From 6cc41f92f8adfabe3df4fab7780f56830b408eec Mon Sep 17 00:00:00 2001 From: Kaviraj Date: Tue, 19 Jan 2021 10:10:19 +0100 Subject: [PATCH] Gcplog targetmanager (#3083) * Add new target manager(PubsubTarget) for promtail Lets you to scrape log entries from pubsub topic and send it to lok - Basic `Target` and `TargetManager` - Minimum config to make it work * make the receive loop on pubsub manager work * Add mmock pubsub server for testing * Remove debugging logs * Minor changes to messages pushing into loki * Basic formatter to convert GCP log entry to loki log entry * Reverting go.mod, go.sum and vendors to master branch * only pubsub dependencies * rebuild proto * Fix protobuf diffs * Remove debug printfs * Add basic formatter for GCP log entry - Create uniq label to create stream without out-of-order timestamp - extract instance id and timestamp from log entry * - Small fixes to formatter - Config changes on pubsub manager for testing * - Formatting changes - Rewrite timestamp to avoid out-of-order errors on loki - Minor config changes to Pubsub Target manager * Fix target formatter tests * PR remarks * Minor refactoring and following changes - Rename Pubsub* entities to Gcplog* - Rename pubsub package into gcplog - Add flag `KeepIncomingTimestamp` to toggle between either keeping incoming timestamps or rewriting with current timestamp - Tests for gcplog target - Docs and basic metrics to gcplog target * Add docs for cloud configure for gcplog. * Add docs for cloud configure for gcplog. * Review feedback * Checkin generated files * Making linters happy Co-authored-by: Edward Welch --- .../promtail-local-pubsub-config.yaml | 18 + docs/sources/clients/promtail/gcplog-cloud.md | 64 + docs/sources/clients/promtail/scraping.md | 22 + go.mod | 2 + go.sum | 1 + pkg/distributor/http.go | 68 +- pkg/promtail/scrapeconfig/scrapeconfig.go | 18 + pkg/promtail/targets/file/filetarget.go | 11 +- pkg/promtail/targets/gcplog/formatter.go | 90 + pkg/promtail/targets/gcplog/formatter_test.go | 98 + pkg/promtail/targets/gcplog/target.go | 169 + pkg/promtail/targets/gcplog/target_test.go | 234 + pkg/promtail/targets/gcplog/targetmanager.go | 80 + pkg/promtail/targets/manager.go | 14 + pkg/promtail/targets/target/target.go | 3 + .../go/internal/testutil/cmp.go | 61 + .../go/internal/testutil/context.go | 152 + .../go/internal/testutil/headers_enforcer.go | 187 + .../go/internal/testutil/rand.go | 44 + .../go/internal/testutil/server.go | 135 + .../go/internal/testutil/trace.go | 78 + vendor/cloud.google.com/go/pubsub/CHANGES.md | 36 + vendor/cloud.google.com/go/pubsub/LICENSE | 202 + vendor/cloud.google.com/go/pubsub/README.md | 46 + .../cloud.google.com/go/pubsub/apiv1/doc.go | 100 + .../cloud.google.com/go/pubsub/apiv1/iam.go | 36 + .../go/pubsub/apiv1/path_funcs.go | 95 + .../go/pubsub/apiv1/publisher_client.go | 534 ++ .../go/pubsub/apiv1/subscriber_client.go | 808 +++ vendor/cloud.google.com/go/pubsub/debug.go | 72 + vendor/cloud.google.com/go/pubsub/doc.go | 140 + .../go/pubsub/flow_controller.go | 122 + vendor/cloud.google.com/go/pubsub/go.mod | 19 + vendor/cloud.google.com/go/pubsub/go.sum | 373 ++ .../go/pubsub/go_mod_tidy_hack.go | 22 + .../internal/distribution/distribution.go | 79 + vendor/cloud.google.com/go/pubsub/iterator.go | 533 ++ vendor/cloud.google.com/go/pubsub/message.go | 120 + vendor/cloud.google.com/go/pubsub/nodebug.go | 25 + .../cloud.google.com/go/pubsub/pstest/fake.go | 1010 +++ vendor/cloud.google.com/go/pubsub/pubsub.go | 112 + .../cloud.google.com/go/pubsub/pullstream.go | 189 + vendor/cloud.google.com/go/pubsub/service.go | 100 + vendor/cloud.google.com/go/pubsub/snapshot.go | 160 + .../go/pubsub/subscription.go | 801 +++ vendor/cloud.google.com/go/pubsub/topic.go | 558 ++ vendor/cloud.google.com/go/pubsub/trace.go | 217 + .../api/support/bundler/bundler.go | 407 ++ .../googleapis/pubsub/v1/pubsub.pb.go | 5912 +++++++++++++++++ vendor/modules.txt | 10 + 50 files changed, 14324 insertions(+), 63 deletions(-) create mode 100644 cmd/promtail/promtail-local-pubsub-config.yaml create mode 100644 docs/sources/clients/promtail/gcplog-cloud.md create mode 100644 pkg/promtail/targets/gcplog/formatter.go create mode 100644 pkg/promtail/targets/gcplog/formatter_test.go create mode 100644 pkg/promtail/targets/gcplog/target.go create mode 100644 pkg/promtail/targets/gcplog/target_test.go create mode 100644 pkg/promtail/targets/gcplog/targetmanager.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/cmp.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/context.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/headers_enforcer.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/rand.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/server.go create mode 100644 vendor/cloud.google.com/go/internal/testutil/trace.go create mode 100644 vendor/cloud.google.com/go/pubsub/CHANGES.md create mode 100644 vendor/cloud.google.com/go/pubsub/LICENSE create mode 100644 vendor/cloud.google.com/go/pubsub/README.md create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/doc.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/iam.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go create mode 100644 vendor/cloud.google.com/go/pubsub/debug.go create mode 100644 vendor/cloud.google.com/go/pubsub/doc.go create mode 100644 vendor/cloud.google.com/go/pubsub/flow_controller.go create mode 100644 vendor/cloud.google.com/go/pubsub/go.mod create mode 100644 vendor/cloud.google.com/go/pubsub/go.sum create mode 100644 vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go create mode 100644 vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go create mode 100644 vendor/cloud.google.com/go/pubsub/iterator.go create mode 100644 vendor/cloud.google.com/go/pubsub/message.go create mode 100644 vendor/cloud.google.com/go/pubsub/nodebug.go create mode 100644 vendor/cloud.google.com/go/pubsub/pstest/fake.go create mode 100644 vendor/cloud.google.com/go/pubsub/pubsub.go create mode 100644 vendor/cloud.google.com/go/pubsub/pullstream.go create mode 100644 vendor/cloud.google.com/go/pubsub/service.go create mode 100644 vendor/cloud.google.com/go/pubsub/snapshot.go create mode 100644 vendor/cloud.google.com/go/pubsub/subscription.go create mode 100644 vendor/cloud.google.com/go/pubsub/topic.go create mode 100644 vendor/cloud.google.com/go/pubsub/trace.go create mode 100644 vendor/google.golang.org/api/support/bundler/bundler.go create mode 100644 vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go diff --git a/cmd/promtail/promtail-local-pubsub-config.yaml b/cmd/promtail/promtail-local-pubsub-config.yaml new file mode 100644 index 000000000000..fc87540c71af --- /dev/null +++ b/cmd/promtail/promtail-local-pubsub-config.yaml @@ -0,0 +1,18 @@ +server: + http_listen_port: 9080 + grpc_listen_port: 0 + +positions: + filename: /tmp/positions.yaml + +clients: + - url: http://localhost:3100/loki/api/v1/push + +scrape_configs: + - job_name: pubsub-test + gcplog: + project_id: "grafanalabs-dev" + subscription: "dev-logs-pull" + use_incoming_timestamp: false # default rewrite timestamp. + labels: + job: pubsub-gcp diff --git a/docs/sources/clients/promtail/gcplog-cloud.md b/docs/sources/clients/promtail/gcplog-cloud.md new file mode 100644 index 000000000000..4b56431f2a42 --- /dev/null +++ b/docs/sources/clients/promtail/gcplog-cloud.md @@ -0,0 +1,64 @@ +--- +title: Cloud setup for gcplog TargetManager +--- +This document explain how one can setup Google Cloud Platform to forward its cloud resource logs from a particular GCP project into Google Pubsub topic so that is available for Loki promtail to consume. + +This document assumes, that reader have `gcloud` installed and have required permissions(as mentioned in #[Roles and Permission] section) + +# Roles and Permission +User should have following roles to complete the setup. +- "roles/pubsub.editor" +- "roles/logging.configWriter" + +# Setup Pubsub Topic +Google Pubsub Topic will act as the queue to persist log messages which then can be read from `promtail`. + +```bash +$ gcloud pubsub topics create $TOPIC_ID +``` + +e.g: +```bash +$ gcloud pubsub topics create cloud-logs +``` + +# Setup Log Router +We create a log sink to forward cloud logs into pubsub topic created before + +```bash +$ gcloud beta logging sinks create $SINK_NAME $SINK_LOCATION $OPTIONAL_FLAGS +``` + +e.g: +```bash +$ gcloud beta logging sinks create cloud-logs pubsub.googleapis.com/projects/my-project/topics/cloud-logs \ +--log-filter='resource.type=("gcs_bucket")' \ +--description="Cloud logs" +``` + +Above command also adds `log-filter` option which represents what type of logs should get into the destination `pubsub` topic. +For more information on adding `log-filter` refer this [document](https://cloud.google.com/logging/docs/export/configure_export_v2#creating_sink) + +# Create Pubsub subscription for Loki +We create subscription for the pubsub topic we create above and `promtail` uses this subscription to consume log messages. + +```bash +$ gcloud pubsub subscriptions create cloud-logs --topic=$TOPIC_ID \ +--ack-deadline=$ACK_DEADLINE \ +--message-retention-duration=$RETENTION_DURATION \ +``` + +e.g: +```bash +$ gcloud pubsub subscriptions create cloud-logs --topic=pubsub.googleapis.com/projects/my-project/topics/cloud-logs \ +--ack-deadline=10s \ +--message-retention-duration=7d \ +``` + +For more fine grained options, refer to the `gcloud pubsub subscriptions --help` + +# ServiceAccount for Promtail +We need a service account with following permissions. +- pubsub.subscriber + +This enables promtail to read log entries from the pubsub subscription created before. diff --git a/docs/sources/clients/promtail/scraping.md b/docs/sources/clients/promtail/scraping.md index c129ce113105..663cd657dcae 100644 --- a/docs/sources/clients/promtail/scraping.md +++ b/docs/sources/clients/promtail/scraping.md @@ -154,6 +154,28 @@ Keep in mind that labels prefixed with `__` will be dropped, so relabeling is re target_label: syslog_identifier ``` +## Gcplog scraping +Promtail supports scraping cloud resource logs(say GCS bucket logs, Load Balancer logs, Kubernetes Cluster logs) from GCP. +Configs are set in `gcplog` section in `scrape_config` + +```yaml + - job_name: gcplog + gcplog: + project_id: "my-gcp-project" + subscription: "my-pubsub-subscription" + use_incoming_timestamp: false # default rewrite timestamps. + labels: + job: "gcplog" +``` +Here `project_id` and `subscription` are the only required fields. + +- `project_id` is the GCP project id. +- `subscription` is the GCP pubsub subscription where promtail can consume log entries from. + +Before using `gcplog` target, GCP should be [configured](../gcplog-cloud) with pubsub subscription to receive logs from. + +It also support `relabeling` and `pipeline` stages just like other targets. + ## Syslog Receiver Promtail supports receiving [IETF Syslog (RFC5424)](https://tools.ietf.org/html/rfc5424) diff --git a/go.mod b/go.mod index bac0368e09d5..e0fb481ba23c 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/grafana/loki go 1.15 require ( + cloud.google.com/go/pubsub v1.3.1 github.com/NYTimes/gziphandler v1.1.1 github.com/aws/aws-lambda-go v1.17.0 github.com/blang/semver v3.5.1+incompatible // indirect @@ -61,6 +62,7 @@ require ( go.uber.org/atomic v1.7.0 golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 golang.org/x/net v0.0.0-20201110031124-69a78807bb2b + google.golang.org/api v0.35.0 google.golang.org/grpc v1.33.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 diff --git a/go.sum b/go.sum index cfdf14f064af..d1cfbb4494e6 100644 --- a/go.sum +++ b/go.sum @@ -39,6 +39,7 @@ cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2k cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1 h1:ukjixP1wl0LpnZ6LWtZJ0mX5tBmjp1f8Sqer8Z2OMUU= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= diff --git a/pkg/distributor/http.go b/pkg/distributor/http.go index 8935bd657134..b25e3961bf0f 100644 --- a/pkg/distributor/http.go +++ b/pkg/distributor/http.go @@ -4,12 +4,7 @@ import ( "math" "net/http" - "github.com/dustin/go-humanize" - "github.com/go-kit/kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" - "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/util" @@ -17,28 +12,15 @@ import ( "github.com/grafana/loki/pkg/logproto" "github.com/grafana/loki/pkg/logql/unmarshal" unmarshal_legacy "github.com/grafana/loki/pkg/logql/unmarshal/legacy" - lokiutil "github.com/grafana/loki/pkg/util" ) -var ( - contentType = http.CanonicalHeaderKey("Content-Type") - - bytesIngested = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "loki", - Name: "distributor_bytes_received_total", - Help: "The total number of uncompressed bytes received per tenant", - }, []string{"tenant"}) - linesIngested = promauto.NewCounterVec(prometheus.CounterOpts{ - Namespace: "loki", - Name: "distributor_lines_received_total", - Help: "The total number of lines received per tenant", - }, []string{"tenant"}) -) +var contentType = http.CanonicalHeaderKey("Content-Type") const applicationJSON = "application/json" // PushHandler reads a snappy-compressed proto from the HTTP body. func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { + req, err := ParseRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) @@ -60,54 +42,16 @@ func (d *Distributor) PushHandler(w http.ResponseWriter, r *http.Request) { } func ParseRequest(r *http.Request) (*logproto.PushRequest, error) { - userID, _ := user.ExtractOrgID(r.Context()) - logger := util.WithContext(r.Context(), util.Logger) - body := lokiutil.NewSizeReader(r.Body) - contentType := r.Header.Get(contentType) var req logproto.PushRequest - defer func() { - var ( - entriesSize int64 - streamLabelsSize int64 - totalEntries int64 - ) - - for _, s := range req.Streams { - streamLabelsSize += int64(len(s.Labels)) - for _, e := range s.Entries { - totalEntries++ - entriesSize += int64(len(e.Line)) - } - } - - // incrementing tenant metrics if we have a tenant. - if totalEntries != 0 && userID != "" { - bytesIngested.WithLabelValues(userID).Add(float64(entriesSize)) - linesIngested.WithLabelValues(userID).Add(float64(totalEntries)) - } - - level.Debug(logger).Log( - "msg", "push request parsed", - "path", r.URL.Path, - "contentType", contentType, - "bodySize", humanize.Bytes(uint64(body.Size())), - "streams", len(req.Streams), - "entries", totalEntries, - "streamLabelsSize", humanize.Bytes(uint64(streamLabelsSize)), - "entriesSize", humanize.Bytes(uint64(entriesSize)), - "totalSize", humanize.Bytes(uint64(entriesSize+streamLabelsSize)), - ) - }() - - switch contentType { + switch r.Header.Get(contentType) { case applicationJSON: var err error if loghttp.GetVersion(r.RequestURI) == loghttp.VersionV1 { - err = unmarshal.DecodePushRequest(body, &req) + err = unmarshal.DecodePushRequest(r.Body, &req) } else { - err = unmarshal_legacy.DecodePushRequest(body, &req) + err = unmarshal_legacy.DecodePushRequest(r.Body, &req) } if err != nil { @@ -115,7 +59,7 @@ func ParseRequest(r *http.Request) (*logproto.PushRequest, error) { } default: - if err := util.ParseProtoReader(r.Context(), body, int(r.ContentLength), math.MaxInt32, &req, util.RawSnappy); err != nil { + if err := util.ParseProtoReader(r.Context(), r.Body, int(r.ContentLength), math.MaxInt32, &req, util.RawSnappy); err != nil { return nil, err } } diff --git a/pkg/promtail/scrapeconfig/scrapeconfig.go b/pkg/promtail/scrapeconfig/scrapeconfig.go index bef131b7d5ed..48cf909f3a7a 100644 --- a/pkg/promtail/scrapeconfig/scrapeconfig.go +++ b/pkg/promtail/scrapeconfig/scrapeconfig.go @@ -31,6 +31,7 @@ type Config struct { JobName string `yaml:"job_name,omitempty"` PipelineStages stages.PipelineStages `yaml:"pipeline_stages,omitempty"` JournalConfig *JournalTargetConfig `yaml:"journal,omitempty"` + GcplogConfig *GcplogTargetConfig `yaml:"gcplog,omitempty"` SyslogConfig *SyslogTargetConfig `yaml:"syslog,omitempty"` PushConfig *PushTargetConfig `yaml:"loki_push_api,omitempty"` RelabelConfigs []*relabel.Config `yaml:"relabel_configs,omitempty"` @@ -164,6 +165,23 @@ type SyslogTargetConfig struct { UseIncomingTimestamp bool `yaml:"use_incoming_timestamp"` } +// GcplogTargetConfig describes a scrape config to pull logs from any pubsub topic. +type GcplogTargetConfig struct { + // ProjectID is the Cloud project id + ProjectID string `yaml:"project_id"` + + // Subscription is the scription name we use to pull logs from a pubsub topic. + Subscription string `yaml:"subscription"` + + // Labels are the additional labels to be added to log entry while pushing it to Loki server. + Labels model.LabelSet `yaml:"labels"` + + // UseIncomingTimestamp represents whether to keep the timestamp same as actual log entry coming in or replace it with + // current timestamp at the time of processing. + // Its default value(`false`) denotes, replace it with current timestamp at the time of processing. + UseIncomingTimestamp bool `yaml:"use_incoming_timestamp"` +} + // PushTargetConfig describes a scrape config that listens for Loki push messages. type PushTargetConfig struct { // Server is the weaveworks server config for listening connections diff --git a/pkg/promtail/targets/file/filetarget.go b/pkg/promtail/targets/file/filetarget.go index 8a380e91c6be..b32b3da10aa6 100644 --- a/pkg/promtail/targets/file/filetarget.go +++ b/pkg/promtail/targets/file/filetarget.go @@ -65,7 +65,16 @@ type FileTarget struct { } // NewFileTarget create a new FileTarget. -func NewFileTarget(metrics *Metrics, logger log.Logger, handler api.EntryHandler, positions positions.Positions, path string, labels model.LabelSet, discoveredLabels model.LabelSet, targetConfig *Config) (*FileTarget, error) { +func NewFileTarget( + metrics *Metrics, + logger log.Logger, + handler api.EntryHandler, + positions positions.Positions, + path string, + labels model.LabelSet, + discoveredLabels model.LabelSet, + targetConfig *Config, +) (*FileTarget, error) { watcher, err := fsnotify.NewWatcher() if err != nil { diff --git a/pkg/promtail/targets/gcplog/formatter.go b/pkg/promtail/targets/gcplog/formatter.go new file mode 100644 index 000000000000..3ed786b1972c --- /dev/null +++ b/pkg/promtail/targets/gcplog/formatter.go @@ -0,0 +1,90 @@ +package gcplog + +import ( + "fmt" + "strings" + "time" + + "cloud.google.com/go/pubsub" + json "github.com/json-iterator/go" + "github.com/prometheus/common/model" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/promtail/api" +) + +// LogEntry that will be written to the pubsub topic. +// According to the following spec. +// https://cloud.google.com/logging/docs/reference/v2/rest/v2/LogEntry +// nolint: golint +type GCPLogEntry struct { + LogName string `json:"logName"` + Resource struct { + Type string `json:"type"` + Labels map[string]string `json:"labels"` + } `json:"resource"` + Timestamp string `json:"timestamp"` + + // The time the log entry was received by Logging. + // Its important that `Timestamp` is optional in GCE log entry. + ReceiveTimestamp string `json:"receiveTimestamp"` + + TextPayload string `json:"textPayload"` + + // NOTE(kavi): There are other fields on GCPLogEntry. but we need only need above fields for now + // anyway we will be sending the entire entry to Loki. +} + +func format(m *pubsub.Message, other model.LabelSet, useIncomingTimestamp bool) (api.Entry, error) { + var ge GCPLogEntry + + if err := json.Unmarshal(m.Data, &ge); err != nil { + return api.Entry{}, err + } + + labels := model.LabelSet{ + "logName": model.LabelValue(ge.LogName), + "resourceType": model.LabelValue(ge.Resource.Type), + } + for k, v := range ge.Resource.Labels { + if !model.LabelName(k).IsValid() || !model.LabelValue(k).IsValid() { + continue + } + labels[model.LabelName(k)] = model.LabelValue(v) + } + + // add labels from config as well. + labels = labels.Merge(other) + + ts := time.Now() + line := string(m.Data) + + if useIncomingTimestamp { + tt := ge.Timestamp + if tt == "" { + tt = ge.ReceiveTimestamp + } + var err error + ts, err = time.Parse(time.RFC3339, tt) + if err != nil { + return api.Entry{}, fmt.Errorf("invalid timestamp format: %w", err) + } + + if ts.IsZero() { + return api.Entry{}, fmt.Errorf("no timestamp found in the log entry") + } + } + + // Send only `ge.textPaylload` as log line if its present. + if strings.TrimSpace(ge.TextPayload) != "" { + line = ge.TextPayload + } + + return api.Entry{ + Labels: labels, + Entry: logproto.Entry{ + Timestamp: ts, + Line: line, + }, + }, nil +} diff --git a/pkg/promtail/targets/gcplog/formatter_test.go b/pkg/promtail/targets/gcplog/formatter_test.go new file mode 100644 index 000000000000..dcca65ef1020 --- /dev/null +++ b/pkg/promtail/targets/gcplog/formatter_test.go @@ -0,0 +1,98 @@ +package gcplog + +import ( + "testing" + "time" + + "cloud.google.com/go/pubsub" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/loki/pkg/logproto" + "github.com/grafana/loki/pkg/promtail/api" +) + +func TestFormat(t *testing.T) { + cases := []struct { + name string + msg *pubsub.Message + labels model.LabelSet + useIncomingTimestamp bool + expected api.Entry + }{ + { + name: "use-original-timestamp", + msg: &pubsub.Message{ + Data: []byte(withAllFields), + }, + labels: model.LabelSet{ + "jobname": "pubsub-test", + }, + useIncomingTimestamp: true, + expected: api.Entry{ + Labels: model.LabelSet{ + "jobname": "pubsub-test", + "logName": "https://project/gcs", + "resourceType": "gcs", + "instanceId": "344555", + }, + Entry: logproto.Entry{ + Timestamp: mustTime(t, "2020-12-22T15:01:23.045123456Z"), + Line: withAllFields, + }, + }, + }, + { + name: "rewrite-timestamp", + msg: &pubsub.Message{ + Data: []byte(withAllFields), + }, + labels: model.LabelSet{ + "jobname": "pubsub-test", + }, + expected: api.Entry{ + Labels: model.LabelSet{ + "jobname": "pubsub-test", + "logName": "https://project/gcs", + "resourceType": "gcs", + "instanceId": "344555", + }, + Entry: logproto.Entry{ + Timestamp: time.Now(), + Line: withAllFields, + }, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got, err := format(c.msg, c.labels, c.useIncomingTimestamp) + + require.NoError(t, err) + + assert.Equal(t, c.expected.Labels, got.Labels) + assert.Equal(t, c.expected.Line, got.Line) + if c.useIncomingTimestamp { + assert.Equal(t, c.expected.Entry.Timestamp, got.Timestamp) + } else { + if got.Timestamp.Sub(c.expected.Timestamp).Seconds() > 1 { + assert.Fail(t, "timestamp shouldn't differ much when rewriting log entry timestamp.") + } + } + }) + } +} + +func mustTime(t *testing.T, v string) time.Time { + t.Helper() + + ts, err := time.Parse(time.RFC3339, v) + require.NoError(t, err) + return ts +} + +const ( + withAllFields = `{"logName": "https://project/gcs", "resource": {"type": "gcs", "labels": {"instanceId": "344555"}}, "timestamp": "2020-12-22T15:01:23.045123456Z"}` +) diff --git a/pkg/promtail/targets/gcplog/target.go b/pkg/promtail/targets/gcplog/target.go new file mode 100644 index 000000000000..b4681e840c47 --- /dev/null +++ b/pkg/promtail/targets/gcplog/target.go @@ -0,0 +1,169 @@ +package gcplog + +import ( + "context" + + "cloud.google.com/go/pubsub" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/pkg/relabel" + + "github.com/grafana/loki/pkg/promtail/api" + "github.com/grafana/loki/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/pkg/promtail/targets/target" +) + +var ( + gcplogEntries = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "promtail", + Name: "gcplog_target_entries_total", + Help: "Help number of successful entries sent to the gcplog target", + }, []string{"project"}) + + gcplogErrors = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: "promtail", + Name: "gcplog_target_parsing_errors_total", + Help: "Total number of parsing errors while receiving gcplog messages", + }, []string{"project"}) +) + +// GcplogTarget represents the target specific to GCP project. +// It collects logs from GCP and push it to Loki. +// nolint:golint +type GcplogTarget struct { + logger log.Logger + handler api.EntryHandler + config *scrapeconfig.GcplogTargetConfig + relabelConfig []*relabel.Config + jobName string + + // lifecycle management + ctx context.Context + cancel context.CancelFunc + + // pubsub + ps *pubsub.Client + msgs chan *pubsub.Message +} + +// NewGcplogTarget returns the new instannce of GcplogTarget for +// the given `project-id`. It scraps logs from the GCP project +// and push it Loki via given `api.EntryHandler.` +// It starts the `run` loop to consume log entries that can be +// stopped via `target.Stop()` +// nolint:golint,govet +func NewGcplogTarget( + logger log.Logger, + handler api.EntryHandler, + relabel []*relabel.Config, + jobName string, + config *scrapeconfig.GcplogTargetConfig, +) (*GcplogTarget, error) { + + ctx, cancel := context.WithCancel(context.Background()) + + ps, err := pubsub.NewClient(ctx, config.ProjectID) + if err != nil { + return nil, err + } + + target := newGcplogTarget(logger, handler, relabel, jobName, config, ps, ctx, cancel) + + go func() { + _ = target.run() + }() + + return target, nil +} + +// nolint: golint +func newGcplogTarget( + logger log.Logger, + handler api.EntryHandler, + relabel []*relabel.Config, + jobName string, + config *scrapeconfig.GcplogTargetConfig, + pubsubClient *pubsub.Client, + ctx context.Context, + cancel func(), +) *GcplogTarget { + + return &GcplogTarget{ + logger: logger, + handler: handler, + relabelConfig: relabel, + config: config, + jobName: jobName, + ctx: ctx, + cancel: cancel, + ps: pubsubClient, + msgs: make(chan *pubsub.Message), + } +} + +func (t *GcplogTarget) run() error { + send := t.handler.Chan() + + sub := t.ps.SubscriptionInProject(t.config.Subscription, t.config.ProjectID) + go func() { + // NOTE(kavi): `cancel` the context as exiting from this goroutine should stop main `run` loop + // It makesense as no more messages will be received. + defer t.cancel() + + err := sub.Receive(t.ctx, func(ctx context.Context, m *pubsub.Message) { + t.msgs <- m + }) + if err != nil { + // TODO(kavi): Add proper error propagation maybe? + level.Error(t.logger).Log("error", err) + gcplogErrors.WithLabelValues(t.config.ProjectID).Inc() + } + }() + + for { + select { + case <-t.ctx.Done(): + return t.ctx.Err() + case m := <-t.msgs: + entry, err := format(m, t.config.Labels, t.config.UseIncomingTimestamp) + if err != nil { + level.Error(t.logger).Log("event", "error formating log entry", "cause", err) + m.Ack() + break + } + send <- entry + m.Ack() // Ack only after log is sent. + gcplogEntries.WithLabelValues(t.config.ProjectID).Inc() + } + } +} + +// sent implements target.Target. +func (t *GcplogTarget) Type() target.TargetType { + return target.GcplogTargetType +} + +func (t *GcplogTarget) Ready() bool { + return t.ctx.Err() == nil +} + +func (t *GcplogTarget) DiscoveredLabels() model.LabelSet { + return nil +} + +func (t *GcplogTarget) Labels() model.LabelSet { + return t.config.Labels +} + +func (t *GcplogTarget) Details() interface{} { + return nil +} + +func (t *GcplogTarget) Stop() error { + t.handler.Stop() + t.cancel() + return nil +} diff --git a/pkg/promtail/targets/gcplog/target_test.go b/pkg/promtail/targets/gcplog/target_test.go new file mode 100644 index 000000000000..997cf73393c8 --- /dev/null +++ b/pkg/promtail/targets/gcplog/target_test.go @@ -0,0 +1,234 @@ +package gcplog + +import ( + "context" + "sync" + "testing" + "time" + + "cloud.google.com/go/pubsub" + "cloud.google.com/go/pubsub/pstest" + "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/api/option" + "google.golang.org/grpc" + + "github.com/grafana/loki/pkg/promtail/client/fake" + "github.com/grafana/loki/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/pkg/promtail/targets/target" +) + +func TestGcplogTarget_Run(t *testing.T) { + // Goal: Check message written to pubsub topic is received by the target. + tt, apiclient, pubsubClient, teardown := testGcplogTarget(t) + defer teardown() + + // seed pubsub + ctx := context.Background() + tp, err := pubsubClient.CreateTopic(ctx, topic) + require.NoError(t, err) + _, err = pubsubClient.CreateSubscription(ctx, subscription, pubsub.SubscriptionConfig{ + Topic: tp, + }) + require.NoError(t, err) + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + _ = tt.run() + }() + + publishMessage(t, tp) + + // Wait till message is received by the run loop. + // NOTE(kavi): sleep is not ideal. but not other way to confirm if api.Handler received messages + time.Sleep(1 * time.Second) + + err = tt.Stop() + require.NoError(t, err) + + // wait till `run` stops. + wg.Wait() + + assert.Equal(t, 1, len(apiclient.Received())) +} + +func TestGcplogTarget_Stop(t *testing.T) { + // Goal: To test that `run()` stops when you invoke `target.Stop()` + + errs := make(chan error, 1) + + tt, _, _, teardown := testGcplogTarget(t) + defer teardown() + + var wg sync.WaitGroup + + wg.Add(1) + go func() { + defer wg.Done() + errs <- tt.run() + }() + + // invoke stop + _ = tt.Stop() + + // wait till run returns + wg.Wait() + + // wouldn't block as 1 error is buffered into the channel. + err := <-errs + + // returned error should be cancelled context error + assert.Equal(t, tt.ctx.Err(), err) +} + +func TestGcplogTarget_Type(t *testing.T) { + tt, _, _, teardown := testGcplogTarget(t) + defer teardown() + + assert.Equal(t, target.TargetType("Gcplog"), tt.Type()) +} + +func TestGcplogTarget_Ready(t *testing.T) { + tt, _, _, teardown := testGcplogTarget(t) + defer teardown() + + assert.Equal(t, true, tt.Ready()) +} + +func TestGcplogTarget_Labels(t *testing.T) { + tt, _, _, teardown := testGcplogTarget(t) + defer teardown() + + assert.Equal(t, model.LabelSet{"job": "test-gcplogtarget"}, tt.Labels()) +} + +func testGcplogTarget(t *testing.T) (*GcplogTarget, *fake.Client, *pubsub.Client, func()) { + t.Helper() + + ctx, cancel := context.WithCancel(context.Background()) + + mockSvr := pstest.NewServer() + conn, err := grpc.Dial(mockSvr.Addr, grpc.WithInsecure()) + require.NoError(t, err) + + mockpubsubClient, err := pubsub.NewClient(ctx, testConfig.ProjectID, option.WithGRPCConn(conn)) + require.NoError(t, err) + + fakeClient := fake.New(func() {}) + + target := newGcplogTarget( + log.NewNopLogger(), + fakeClient, + nil, + "job-test-gcplogtarget", + testConfig, + mockpubsubClient, + ctx, + cancel, + ) + + // cleanup + return target, fakeClient, mockpubsubClient, func() { + cancel() + conn.Close() + mockSvr.Close() + } +} + +func publishMessage(t *testing.T, topic *pubsub.Topic) { + t.Helper() + + ctx := context.Background() + res := topic.Publish(ctx, &pubsub.Message{Data: []byte(gcpLogEntry)}) + _, err := res.Get(ctx) // wait till message is actully published + require.NoError(t, err) +} + +const ( + project = "test-project" + topic = "test-topic" + subscription = "test-subscription" + + gcpLogEntry = ` +{ + "insertId": "ajv4d1f1ch8dr", + "logName": "projects/grafanalabs-dev/logs/cloudaudit.googleapis.com%2Fdata_access", + "protoPayload": { + "@type": "type.googleapis.com/google.cloud.audit.AuditLog", + "authenticationInfo": { + "principalEmail": "1040409107725-compute@developer.gserviceaccount.com", + "serviceAccountDelegationInfo": [ + { + "firstPartyPrincipal": { + "principalEmail": "service-1040409107725@compute-system.iam.gserviceaccount.com" + } + } + ] + }, + "authorizationInfo": [ + { + "granted": true, + "permission": "storage.objects.list", + "resource": "projects/_/buckets/dev-us-central1-cortex-tsdb-dev", + "resourceAttributes": { + } + }, + { + "permission": "storage.objects.get", + "resource": "projects/_/buckets/dev-us-central1-cortex-tsdb-dev/objects/load-generator-20/01EM34PFBC2SCV3ETBGRAQZ090/deletion-mark.json", + "resourceAttributes": { + } + } + ], + "methodName": "storage.objects.get", + "requestMetadata": { + "callerIp": "34.66.19.193", + "callerNetwork": "//compute.googleapis.com/projects/grafanalabs-dev/global/networks/__unknown__", + "callerSuppliedUserAgent": "thanos-store-gateway/1.5.0 (go1.14.9),gzip(gfe)", + "destinationAttributes": { + }, + "requestAttributes": { + "auth": { + }, + "time": "2021-01-01T02:17:10.661405637Z" + } + }, + "resourceLocation": { + "currentLocations": [ + "us-central1" + ] + }, + "resourceName": "projects/_/buckets/dev-us-central1-cortex-tsdb-dev/objects/load-generator-20/01EM34PFBC2SCV3ETBGRAQZ090/deletion-mark.json", + "serviceName": "storage.googleapis.com", + "status": { + } + }, + "receiveTimestamp": "2021-01-01T02:17:10.82013623Z", + "resource": { + "labels": { + "bucket_name": "dev-us-central1-cortex-tsdb-dev", + "location": "us-central1", + "project_id": "grafanalabs-dev" + }, + "type": "gcs_bucket" + }, + "severity": "INFO", + "timestamp": "2021-01-01T02:17:10.655982344Z" +} +` +) + +var ( + testConfig = &scrapeconfig.GcplogTargetConfig{ + ProjectID: project, + Subscription: subscription, + Labels: model.LabelSet{ + "job": "test-gcplogtarget", + }, + } +) diff --git a/pkg/promtail/targets/gcplog/targetmanager.go b/pkg/promtail/targets/gcplog/targetmanager.go new file mode 100644 index 000000000000..6fcec0efb0af --- /dev/null +++ b/pkg/promtail/targets/gcplog/targetmanager.go @@ -0,0 +1,80 @@ +package gcplog + +import ( + "fmt" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + + "github.com/grafana/loki/pkg/logentry/stages" + "github.com/grafana/loki/pkg/promtail/api" + "github.com/grafana/loki/pkg/promtail/scrapeconfig" + "github.com/grafana/loki/pkg/promtail/targets/target" +) + +// nolint: golint +type GcplogTargetManager struct { + logger log.Logger + targets map[string]*GcplogTarget +} + +func NewGcplogTargetManager( + logger log.Logger, + client api.EntryHandler, + scrape []scrapeconfig.Config, +) (*GcplogTargetManager, error) { + tm := &GcplogTargetManager{ + logger: logger, + targets: make(map[string]*GcplogTarget), + } + + for _, cf := range scrape { + if cf.GcplogConfig == nil { + continue + } + registerer := prometheus.DefaultRegisterer + pipeline, err := stages.NewPipeline(log.With(logger, "component", "pubsub_pipeline"), cf.PipelineStages, &cf.JobName, registerer) + if err != nil { + return nil, err + } + + t, err := NewGcplogTarget(logger, pipeline.Wrap(client), cf.RelabelConfigs, cf.JobName, cf.GcplogConfig) + if err != nil { + return nil, fmt.Errorf("failed to create pubsub target: %w", err) + } + tm.targets[cf.JobName] = t + } + + return tm, nil +} + +func (tm *GcplogTargetManager) Ready() bool { + for _, t := range tm.targets { + if t.Ready() { + return true + } + } + return false +} + +func (tm *GcplogTargetManager) Stop() { + for name, t := range tm.targets { + if err := t.Stop(); err != nil { + level.Error(t.logger).Log("event", "failed to stop pubsub target", "name", name, "cause", err) + } + } +} + +func (tm *GcplogTargetManager) ActiveTargets() map[string][]target.Target { + // TODO(kavi): if someway to check if specific topic is active and store the state on the target struct? + return tm.AllTargets() +} + +func (tm *GcplogTargetManager) AllTargets() map[string][]target.Target { + res := make(map[string][]target.Target, len(tm.targets)) + for k, v := range tm.targets { + res[k] = []target.Target{v} + } + return res +} diff --git a/pkg/promtail/targets/manager.go b/pkg/promtail/targets/manager.go index d6f33b5945d6..44eb8147852b 100644 --- a/pkg/promtail/targets/manager.go +++ b/pkg/promtail/targets/manager.go @@ -10,6 +10,7 @@ import ( "github.com/grafana/loki/pkg/promtail/positions" "github.com/grafana/loki/pkg/promtail/scrapeconfig" "github.com/grafana/loki/pkg/promtail/targets/file" + "github.com/grafana/loki/pkg/promtail/targets/gcplog" "github.com/grafana/loki/pkg/promtail/targets/journal" "github.com/grafana/loki/pkg/promtail/targets/lokipush" "github.com/grafana/loki/pkg/promtail/targets/stdin" @@ -21,6 +22,7 @@ const ( FileScrapeConfigs = "fileScrapeConfigs" JournalScrapeConfigs = "journalScrapeConfigs" SyslogScrapeConfigs = "syslogScrapeConfigs" + GcplogScrapeConfigs = "gcplogScrapeConfigs" PushScrapeConfigs = "pushScrapeConfigs" ) @@ -73,6 +75,8 @@ func NewTargetManagers( targetScrapeConfigs[JournalScrapeConfigs] = append(targetScrapeConfigs[JournalScrapeConfigs], cfg) case cfg.SyslogConfig != nil: targetScrapeConfigs[SyslogScrapeConfigs] = append(targetScrapeConfigs[SyslogScrapeConfigs], cfg) + case cfg.GcplogConfig != nil: + targetScrapeConfigs[GcplogScrapeConfigs] = append(targetScrapeConfigs[GcplogScrapeConfigs], cfg) case cfg.PushConfig != nil: targetScrapeConfigs[PushScrapeConfigs] = append(targetScrapeConfigs[PushScrapeConfigs], cfg) default: @@ -129,6 +133,16 @@ func NewTargetManagers( return nil, errors.Wrap(err, "failed to make syslog target manager") } targetManagers = append(targetManagers, syslogTargetManager) + case GcplogScrapeConfigs: + pubsubTargetManager, err := gcplog.NewGcplogTargetManager( + logger, + client, + scrapeConfigs, + ) + if err != nil { + return nil, errors.Wrap(err, "failed to make syslog target manager") + } + targetManagers = append(targetManagers, pubsubTargetManager) case PushScrapeConfigs: pushTargetManager, err := lokipush.NewPushTargetManager( reg, diff --git a/pkg/promtail/targets/target/target.go b/pkg/promtail/targets/target/target.go index 60dc3ea57f81..5b881f82a6a1 100644 --- a/pkg/promtail/targets/target/target.go +++ b/pkg/promtail/targets/target/target.go @@ -18,6 +18,9 @@ const ( // SyslogTargetType is a syslog target SyslogTargetType = TargetType("Syslog") + // GcplogTargetType is a target where log entries are pulled from pubsub topic. + GcplogTargetType = TargetType("Gcplog") + // DroppedTargetType is a target that's been dropped. DroppedTargetType = TargetType("dropped") diff --git a/vendor/cloud.google.com/go/internal/testutil/cmp.go b/vendor/cloud.google.com/go/internal/testutil/cmp.go new file mode 100644 index 000000000000..9dd3d9111e4f --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/cmp.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "math" + "math/big" + + "github.com/golang/protobuf/proto" + "github.com/google/go-cmp/cmp" +) + +var ( + alwaysEqual = cmp.Comparer(func(_, _ interface{}) bool { return true }) + + defaultCmpOptions = []cmp.Option{ + // Use proto.Equal for protobufs + cmp.Comparer(proto.Equal), + // Use big.Rat.Cmp for big.Rats + cmp.Comparer(func(x, y *big.Rat) bool { + if x == nil || y == nil { + return x == y + } + return x.Cmp(y) == 0 + }), + // NaNs compare equal + cmp.FilterValues(func(x, y float64) bool { + return math.IsNaN(x) && math.IsNaN(y) + }, alwaysEqual), + cmp.FilterValues(func(x, y float32) bool { + return math.IsNaN(float64(x)) && math.IsNaN(float64(y)) + }, alwaysEqual), + } +) + +// Equal tests two values for equality. +func Equal(x, y interface{}, opts ...cmp.Option) bool { + // Put default options at the end. Order doesn't matter. + opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...) + return cmp.Equal(x, y, opts...) +} + +// Diff reports the differences between two values. +// Diff(x, y) == "" iff Equal(x, y). +func Diff(x, y interface{}, opts ...cmp.Option) string { + // Put default options at the end. Order doesn't matter. + opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...) + return cmp.Diff(x, y, opts...) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/context.go b/vendor/cloud.google.com/go/internal/testutil/context.go new file mode 100644 index 000000000000..edada10464b4 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/context.go @@ -0,0 +1,152 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package testutil contains helper functions for writing tests. +package testutil + +import ( + "context" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "golang.org/x/oauth2/jwt" +) + +const ( + envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID" + envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY" +) + +// ProjID returns the project ID to use in integration tests, or the empty +// string if none is configured. +func ProjID() string { + return os.Getenv(envProjID) +} + +// Credentials returns the credentials to use in integration tests, or nil if +// none is configured. It uses the standard environment variable for tests in +// this repo. +func Credentials(ctx context.Context, scopes ...string) *google.Credentials { + return CredentialsEnv(ctx, envPrivateKey, scopes...) +} + +// CredentialsEnv returns the credentials to use in integration tests, or nil +// if none is configured. If the environment variable is unset, CredentialsEnv +// will try to find 'Application Default Credentials'. Else, CredentialsEnv +// will return nil. CredentialsEnv will log.Fatal if the token source is +// specified but missing or invalid. +func CredentialsEnv(ctx context.Context, envVar string, scopes ...string) *google.Credentials { + key := os.Getenv(envVar) + if key == "" { // Try for application default credentials. + creds, err := google.FindDefaultCredentials(ctx, scopes...) + if err != nil { + log.Println("No 'Application Default Credentials' found.") + return nil + } + return creds + } + + data, err := ioutil.ReadFile(key) + if err != nil { + log.Fatal(err) + } + + creds, err := google.CredentialsFromJSON(ctx, data, scopes...) + if err != nil { + log.Fatal(err) + } + return creds +} + +// TokenSource returns the OAuth2 token source to use in integration tests, +// or nil if none is configured. It uses the standard environment variable +// for tests in this repo. +func TokenSource(ctx context.Context, scopes ...string) oauth2.TokenSource { + return TokenSourceEnv(ctx, envPrivateKey, scopes...) +} + +// TokenSourceEnv returns the OAuth2 token source to use in integration tests. or nil +// if none is configured. It tries to get credentials from the filename in the +// environment variable envVar. If the environment variable is unset, TokenSourceEnv +// will try to find 'Application Default Credentials'. Else, TokenSourceEnv will +// return nil. TokenSourceEnv will log.Fatal if the token source is specified but +// missing or invalid. +func TokenSourceEnv(ctx context.Context, envVar string, scopes ...string) oauth2.TokenSource { + key := os.Getenv(envVar) + if key == "" { // Try for application default credentials. + ts, err := google.DefaultTokenSource(ctx, scopes...) + if err != nil { + log.Println("No 'Application Default Credentials' found.") + return nil + } + return ts + } + conf, err := jwtConfigFromFile(key, scopes) + if err != nil { + log.Fatal(err) + } + return conf.TokenSource(ctx) +} + +// JWTConfig reads the JSON private key file whose name is in the default +// environment variable, and returns the jwt.Config it contains. It ignores +// scopes. +// If the environment variable is empty, it returns (nil, nil). +func JWTConfig() (*jwt.Config, error) { + return jwtConfigFromFile(os.Getenv(envPrivateKey), nil) +} + +// jwtConfigFromFile reads the given JSON private key file, and returns the +// jwt.Config it contains. +// If the filename is empty, it returns (nil, nil). +func jwtConfigFromFile(filename string, scopes []string) (*jwt.Config, error) { + if filename == "" { + return nil, nil + } + jsonKey, err := ioutil.ReadFile(filename) + if err != nil { + return nil, fmt.Errorf("cannot read the JSON key file, err: %v", err) + } + conf, err := google.JWTConfigFromJSON(jsonKey, scopes...) + if err != nil { + return nil, fmt.Errorf("google.JWTConfigFromJSON: %v", err) + } + return conf, nil +} + +// CanReplay reports whether an integration test can be run in replay mode. +// The replay file must exist, and the GCLOUD_TESTS_GOLANG_ENABLE_REPLAY +// environment variable must be non-empty. +func CanReplay(replayFilename string) bool { + if os.Getenv("GCLOUD_TESTS_GOLANG_ENABLE_REPLAY") == "" { + return false + } + _, err := os.Stat(replayFilename) + return err == nil +} + +// ErroringTokenSource is a token source for testing purposes, +// to always return a non-nil error to its caller. It is useful +// when testing error responses with bad oauth2 credentials. +type ErroringTokenSource struct{} + +// Token implements oauth2.TokenSource, returning a nil oauth2.Token and a non-nil error. +func (fts ErroringTokenSource) Token() (*oauth2.Token, error) { + return nil, errors.New("intentional error") +} diff --git a/vendor/cloud.google.com/go/internal/testutil/headers_enforcer.go b/vendor/cloud.google.com/go/internal/testutil/headers_enforcer.go new file mode 100644 index 000000000000..d90b1c63d7c9 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/headers_enforcer.go @@ -0,0 +1,187 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "bytes" + "context" + "errors" + "fmt" + "log" + "os" + "strings" + + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +// HeaderChecker defines header checking and validation rules for any outgoing metadata. +type HeaderChecker struct { + // Key is the header name to be checked against e.g. "x-goog-api-client". + Key string + + // ValuesValidator validates the header values retrieved from mapping against + // Key in the Headers. + ValuesValidator func(values ...string) error +} + +// HeadersEnforcer asserts that outgoing RPC headers +// are present and match expectations. If the expected headers +// are not present or don't match expectations, it'll invoke OnFailure +// with the validation error, or instead log.Fatal if OnFailure is nil. +// +// It expects that every declared key will be present in the outgoing +// RPC header and each value will be validated by the validation function. +type HeadersEnforcer struct { + // Checkers maps header keys that are expected to be sent in the metadata + // of outgoing gRPC requests, against the values passed into the custom + // validation functions. + // + // If Checkers is nil or empty, only the default header "x-goog-api-client" + // will be checked for. + // Otherwise, if you supply Matchers, those keys and their respective + // validation functions will be checked. + Checkers []*HeaderChecker + + // OnFailure is the function that will be invoked after all validation + // failures have been composed. If OnFailure is nil, log.Fatal will be + // invoked instead. + OnFailure func(fmt_ string, args ...interface{}) +} + +// StreamInterceptors returns a list of StreamClientInterceptor functions which +// enforce the presence and validity of expected headers during streaming RPCs. +// +// For client implementations which provide their own StreamClientInterceptor(s) +// these interceptors should be specified as the final elements to +// WithChainStreamInterceptor. +// +// Alternatively, users may apply gPRC options produced from DialOptions to +// apply all applicable gRPC interceptors. +func (h *HeadersEnforcer) StreamInterceptors() []grpc.StreamClientInterceptor { + return []grpc.StreamClientInterceptor{h.interceptStream} +} + +// UnaryInterceptors returns a list of UnaryClientInterceptor functions which +// enforce the presence and validity of expected headers during unary RPCs. +// +// For client implementations which provide their own UnaryClientInterceptor(s) +// these interceptors should be specified as the final elements to +// WithChainUnaryInterceptor. +// +// Alternatively, users may apply gPRC options produced from DialOptions to +// apply all applicable gRPC interceptors. +func (h *HeadersEnforcer) UnaryInterceptors() []grpc.UnaryClientInterceptor { + return []grpc.UnaryClientInterceptor{h.interceptUnary} +} + +// DialOptions returns gRPC DialOptions consisting of unary and stream interceptors +// to enforce the presence and validity of expected headers. +func (h *HeadersEnforcer) DialOptions() []grpc.DialOption { + return []grpc.DialOption{ + grpc.WithChainStreamInterceptor(h.interceptStream), + grpc.WithChainUnaryInterceptor(h.interceptUnary), + } +} + +// CallOptions returns ClientOptions consisting of unary and stream interceptors +// to enforce the presence and validity of expected headers. +func (h *HeadersEnforcer) CallOptions() (copts []option.ClientOption) { + dopts := h.DialOptions() + for _, dopt := range dopts { + copts = append(copts, option.WithGRPCDialOption(dopt)) + } + return +} + +func (h *HeadersEnforcer) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + h.checkMetadata(ctx, method) + return invoker(ctx, method, req, res, cc, opts...) +} + +func (h *HeadersEnforcer) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) { + h.checkMetadata(ctx, method) + return streamer(ctx, desc, cc, method, opts...) +} + +// XGoogClientHeaderChecker is a HeaderChecker that ensures that the "x-goog-api-client" +// header is present on outgoing metadata. +var XGoogClientHeaderChecker = &HeaderChecker{ + Key: "x-goog-api-client", + ValuesValidator: func(values ...string) error { + if len(values) == 0 { + return errors.New("expecting values") + } + for _, value := range values { + switch { + case strings.Contains(value, "gl-go/"): + // TODO: check for exact version strings. + return nil + + default: // Add others here. + } + } + return errors.New("unmatched values") + }, +} + +// DefaultHeadersEnforcer returns a HeadersEnforcer that at bare minimum checks that +// the "x-goog-api-client" key is present in the outgoing metadata headers. On any +// validation failure, it will invoke log.Fatalf with the error message. +func DefaultHeadersEnforcer() *HeadersEnforcer { + return &HeadersEnforcer{ + Checkers: []*HeaderChecker{XGoogClientHeaderChecker}, + } +} + +func (h *HeadersEnforcer) checkMetadata(ctx context.Context, method string) { + onFailure := h.OnFailure + if onFailure == nil { + lgr := log.New(os.Stderr, "", 0) // Do not log the time prefix, it is noisy in test failure logs. + onFailure = func(fmt_ string, args ...interface{}) { + lgr.Fatalf(fmt_, args...) + } + } + + md, ok := metadata.FromOutgoingContext(ctx) + if !ok { + onFailure("Missing metadata for method %q", method) + return + } + checkers := h.Checkers + if len(checkers) == 0 { + // Instead use the default HeaderChecker. + checkers = append(checkers, XGoogClientHeaderChecker) + } + + errBuf := new(bytes.Buffer) + for _, checker := range checkers { + hdrKey := checker.Key + outHdrValues, ok := md[hdrKey] + if !ok { + fmt.Fprintf(errBuf, "missing header %q\n", hdrKey) + continue + } + if err := checker.ValuesValidator(outHdrValues...); err != nil { + fmt.Fprintf(errBuf, "header %q: %v\n", hdrKey, err) + } + } + + if errBuf.Len() != 0 { + onFailure("For method %q, errors:\n%s", method, errBuf) + return + } +} diff --git a/vendor/cloud.google.com/go/internal/testutil/rand.go b/vendor/cloud.google.com/go/internal/testutil/rand.go new file mode 100644 index 000000000000..57d6c1403f51 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/rand.go @@ -0,0 +1,44 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "math/rand" + "sync" + "time" +) + +// NewRand creates a new *rand.Rand seeded with t. The return value is safe for use +// with multiple goroutines. +func NewRand(t time.Time) *rand.Rand { + s := &lockedSource{src: rand.NewSource(t.UnixNano())} + return rand.New(s) +} + +// lockedSource makes a rand.Source safe for use by multiple goroutines. +type lockedSource struct { + mu sync.Mutex + src rand.Source +} + +func (ls *lockedSource) Int63() int64 { + ls.mu.Lock() + defer ls.mu.Unlock() + return ls.src.Int63() +} + +func (ls *lockedSource) Seed(int64) { + panic("shouldn't be calling Seed") +} diff --git a/vendor/cloud.google.com/go/internal/testutil/server.go b/vendor/cloud.google.com/go/internal/testutil/server.go new file mode 100644 index 000000000000..fd9be5cdc289 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/server.go @@ -0,0 +1,135 @@ +/* +Copyright 2016 Google LLC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testutil + +import ( + "fmt" + "log" + "net" + "regexp" + "strconv" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// A Server is an in-process gRPC server, listening on a system-chosen port on +// the local loopback interface. Servers are for testing only and are not +// intended to be used in production code. +// +// To create a server, make a new Server, register your handlers, then call +// Start: +// +// srv, err := NewServer() +// ... +// mypb.RegisterMyServiceServer(srv.Gsrv, &myHandler) +// .... +// srv.Start() +// +// Clients should connect to the server with no security: +// +// conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure()) +// ... +type Server struct { + Addr string + Port int + l net.Listener + Gsrv *grpc.Server +} + +// NewServer creates a new Server. The Server will be listening for gRPC connections +// at the address named by the Addr field, without TLS. +func NewServer(opts ...grpc.ServerOption) (*Server, error) { + return NewServerWithPort(0, opts...) +} + +// NewServerWithPort creates a new Server at a specific port. The Server will be listening +// for gRPC connections at the address named by the Addr field, without TLS. +func NewServerWithPort(port int, opts ...grpc.ServerOption) (*Server, error) { + l, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port)) + if err != nil { + return nil, err + } + s := &Server{ + Addr: l.Addr().String(), + Port: parsePort(l.Addr().String()), + l: l, + Gsrv: grpc.NewServer(opts...), + } + return s, nil +} + +// Start causes the server to start accepting incoming connections. +// Call Start after registering handlers. +func (s *Server) Start() { + go func() { + if err := s.Gsrv.Serve(s.l); err != nil { + log.Printf("testutil.Server.Start: %v", err) + } + }() +} + +// Close shuts down the server. +func (s *Server) Close() { + s.Gsrv.Stop() + s.l.Close() +} + +// PageBounds converts an incoming page size and token from an RPC request into +// slice bounds and the outgoing next-page token. +// +// PageBounds assumes that the complete, unpaginated list of items exists as a +// single slice. In addition to the page size and token, PageBounds needs the +// length of that slice. +// +// PageBounds's first two return values should be used to construct a sub-slice of +// the complete, unpaginated slice. E.g. if the complete slice is s, then +// s[from:to] is the desired page. Its third return value should be set as the +// NextPageToken field of the RPC response. +func PageBounds(pageSize int, pageToken string, length int) (from, to int, nextPageToken string, err error) { + from, to = 0, length + if pageToken != "" { + from, err = strconv.Atoi(pageToken) + if err != nil { + return 0, 0, "", status.Errorf(codes.InvalidArgument, "bad page token: %v", err) + } + if from >= length { + return length, length, "", nil + } + } + if pageSize > 0 && from+pageSize < length { + to = from + pageSize + nextPageToken = strconv.Itoa(to) + } + return from, to, nextPageToken, nil +} + +var portParser = regexp.MustCompile(`:[0-9]+`) + +func parsePort(addr string) int { + res := portParser.FindAllString(addr, -1) + if len(res) == 0 { + panic(fmt.Errorf("parsePort: found no numbers in %s", addr)) + } + stringPort := res[0][1:] // strip the : + p, err := strconv.ParseInt(stringPort, 10, 32) + if err != nil { + panic(err) + } + return int(p) +} diff --git a/vendor/cloud.google.com/go/internal/testutil/trace.go b/vendor/cloud.google.com/go/internal/testutil/trace.go new file mode 100644 index 000000000000..5cf28f894146 --- /dev/null +++ b/vendor/cloud.google.com/go/internal/testutil/trace.go @@ -0,0 +1,78 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package testutil + +import ( + "log" + "sync" + "time" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +// TestExporter is a test utility exporter. It should be created with NewtestExporter. +type TestExporter struct { + mu sync.Mutex + Spans []*trace.SpanData + + Stats chan *view.Data + Views []*view.View +} + +// NewTestExporter creates a TestExporter and registers it with OpenCensus. +func NewTestExporter(views ...*view.View) *TestExporter { + if len(views) == 0 { + views = ocgrpc.DefaultClientViews + } + te := &TestExporter{Stats: make(chan *view.Data), Views: views} + + view.RegisterExporter(te) + view.SetReportingPeriod(time.Millisecond) + if err := view.Register(views...); err != nil { + log.Fatal(err) + } + + trace.RegisterExporter(te) + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + return te +} + +// ExportSpan exports a span. +func (te *TestExporter) ExportSpan(s *trace.SpanData) { + te.mu.Lock() + defer te.mu.Unlock() + te.Spans = append(te.Spans, s) +} + +// ExportView exports a view. +func (te *TestExporter) ExportView(vd *view.Data) { + if len(vd.Rows) > 0 { + select { + case te.Stats <- vd: + default: + } + } +} + +// Unregister unregisters the exporter from OpenCensus. +func (te *TestExporter) Unregister() { + view.Unregister(te.Views...) + view.UnregisterExporter(te) + trace.UnregisterExporter(te) + view.SetReportingPeriod(0) // reset to default value +} diff --git a/vendor/cloud.google.com/go/pubsub/CHANGES.md b/vendor/cloud.google.com/go/pubsub/CHANGES.md new file mode 100644 index 000000000000..49de654c5bb4 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/CHANGES.md @@ -0,0 +1,36 @@ +# Changes + +## v1.3.1 + +- Fix bug with removing dead letter policy from a subscription +- Set default value of MaxExtensionPeriod to 0, which is functionally equivalent + +## v1.3.0 + +- Update cloud.google.com/go to v0.54.0 + +## v1.2.0 + +- Add support for upcoming dead letter topics feature +- Expose Subscription.ReceiveSettings.MaxExtensionPeriod setting +- Standardize default settings with other client libraries + - Increase publish delay threshold from 1ms to 10ms + - Increase subscription MaxExtension from 10m to 60m +- Always send keepalive/heartbeat ping on StreamingPull streams to minimize + stream reopen requests + +## v1.1.0 + +- Limit default grpc connections to 4. +- Fix issues with OpenCensus metric for pull count not including synchronous pull messages. +- Fix issue with publish bundle size calculations. +- Add ClearMessages method to pstest server. + +## v1.0.1 + +Small fix to a package name. + +## v1.0.0 + +This is the first tag to carve out pubsub as its own module. See: +https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. diff --git a/vendor/cloud.google.com/go/pubsub/LICENSE b/vendor/cloud.google.com/go/pubsub/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/cloud.google.com/go/pubsub/README.md b/vendor/cloud.google.com/go/pubsub/README.md new file mode 100644 index 000000000000..59f4cf66d9d2 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/README.md @@ -0,0 +1,46 @@ +## Cloud Pub/Sub [![GoDoc](https://godoc.org/cloud.google.com/go/pubsub?status.svg)](https://godoc.org/cloud.google.com/go/pubsub) + +- [About Cloud Pubsub](https://cloud.google.com/pubsub/) +- [API documentation](https://cloud.google.com/pubsub/docs) +- [Go client documentation](https://godoc.org/cloud.google.com/go/pubsub) +- [Complete sample programs](https://github.com/GoogleCloudPlatform/golang-samples/tree/master/pubsub) + +### Example Usage + +First create a `pubsub.Client` to use throughout your application: + +[snip]:# (pubsub-1) +```go +client, err := pubsub.NewClient(ctx, "project-id") +if err != nil { + log.Fatal(err) +} +``` + +Then use the client to publish and subscribe: + +[snip]:# (pubsub-2) +```go +// Publish "hello world" on topic1. +topic := client.Topic("topic1") +res := topic.Publish(ctx, &pubsub.Message{ + Data: []byte("hello world"), +}) +// The publish happens asynchronously. +// Later, you can get the result from res: +... +msgID, err := res.Get(ctx) +if err != nil { + log.Fatal(err) +} + +// Use a callback to receive messages via subscription1. +sub := client.Subscription("subscription1") +err = sub.Receive(ctx, func(ctx context.Context, m *pubsub.Message) { + fmt.Println(m.Data) + m.Ack() // Acknowledge that we've consumed the message. +}) +if err != nil { + log.Println(err) +} +``` \ No newline at end of file diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/doc.go b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go new file mode 100644 index 000000000000..378e9b72cc81 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/doc.go @@ -0,0 +1,100 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +// Package pubsub is an auto-generated package for the +// Cloud Pub/Sub API. +// +// Provides reliable, many-to-many, asynchronous messaging between +// applications. +// +// Use of Context +// +// The ctx passed to NewClient is used for authentication requests and +// for creating the underlying connection, but is not used for subsequent calls. +// Individual methods on the client use the ctx given to them. +// +// To close the open connection, use the Close() method. +// +// For information about setting deadlines, reusing contexts, and more +// please visit godoc.org/cloud.google.com/go. +package pubsub // import "cloud.google.com/go/pubsub/apiv1" + +import ( + "context" + "runtime" + "strings" + "unicode" + + "google.golang.org/grpc/metadata" +) + +const versionClient = "20200312" + +func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { + out, _ := metadata.FromOutgoingContext(ctx) + out = out.Copy() + for _, md := range mds { + for k, v := range md { + out[k] = append(out[k], v...) + } + } + return metadata.NewOutgoingContext(ctx, out) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/pubsub", + } +} + +// versionGo returns the Go runtime version. The returned string +// has no whitespace, suitable for reporting in header. +func versionGo() string { + const develPrefix = "devel +" + + s := runtime.Version() + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + notSemverRune := func(r rune) bool { + return !strings.ContainsRune("0123456789.", r) + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "UNKNOWN" +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/iam.go b/vendor/cloud.google.com/go/pubsub/apiv1/iam.go new file mode 100644 index 000000000000..4a0c231d7d24 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/iam.go @@ -0,0 +1,36 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "cloud.google.com/go/iam" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +func (c *PublisherClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *PublisherClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} + +func (c *SubscriberClient) SubscriptionIAM(subscription *pubsubpb.Subscription) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), subscription.Name) +} + +func (c *SubscriberClient) TopicIAM(topic *pubsubpb.Topic) *iam.Handle { + return iam.InternalNewHandle(c.Connection(), topic.Name) +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go new file mode 100644 index 000000000000..b9ab4848db1b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/path_funcs.go @@ -0,0 +1,95 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +// PublisherProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func PublisherProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// PublisherTopicPath returns the path for the topic resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// instead. +func PublisherTopicPath(project, topic string) string { + return "" + + "projects/" + + project + + "/topics/" + + topic + + "" +} + +// SubscriberProjectPath returns the path for the project resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s", project) +// instead. +func SubscriberProjectPath(project string) string { + return "" + + "projects/" + + project + + "" +} + +// SubscriberSnapshotPath returns the path for the snapshot resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/snapshots/%s", project, snapshot) +// instead. +func SubscriberSnapshotPath(project, snapshot string) string { + return "" + + "projects/" + + project + + "/snapshots/" + + snapshot + + "" +} + +// SubscriberSubscriptionPath returns the path for the subscription resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/subscriptions/%s", project, subscription) +// instead. +func SubscriberSubscriptionPath(project, subscription string) string { + return "" + + "projects/" + + project + + "/subscriptions/" + + subscription + + "" +} + +// SubscriberTopicPath returns the path for the topic resource. +// +// Deprecated: Use +// fmt.Sprintf("projects/%s/topics/%s", project, topic) +// instead. +func SubscriberTopicPath(project, topic string) string { + return "" + + "projects/" + + project + + "/topics/" + + topic + + "" +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go new file mode 100644 index 000000000000..96c392f826bc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/publisher_client.go @@ -0,0 +1,534 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package pubsub + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// PublisherCallOptions contains the retry settings for each method of PublisherClient. +type PublisherCallOptions struct { + CreateTopic []gax.CallOption + UpdateTopic []gax.CallOption + Publish []gax.CallOption + GetTopic []gax.CallOption + ListTopics []gax.CallOption + ListTopicSubscriptions []gax.CallOption + ListTopicSnapshots []gax.CallOption + DeleteTopic []gax.CallOption +} + +func defaultPublisherClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultPublisherCallOptions() *PublisherCallOptions { + return &PublisherCallOptions{ + CreateTopic: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateTopic: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Publish: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Aborted, + codes.Canceled, + codes.Internal, + codes.ResourceExhausted, + codes.Unknown, + codes.Unavailable, + codes.DeadlineExceeded, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetTopic: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTopics: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTopicSubscriptions: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListTopicSnapshots: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteTopic: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// PublisherClient is a client for interacting with Cloud Pub/Sub API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type PublisherClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + publisherClient pubsubpb.PublisherClient + + // The call options for this service. + CallOptions *PublisherCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewPublisherClient creates a new publisher client. +// +// The service that an application uses to manipulate topics, and to send +// messages to a topic. +func NewPublisherClient(ctx context.Context, opts ...option.ClientOption) (*PublisherClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultPublisherClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &PublisherClient{ + connPool: connPool, + CallOptions: defaultPublisherCallOptions(), + + publisherClient: pubsubpb.NewPublisherClient(connPool), + } + c.SetGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *PublisherClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *PublisherClient) Close() error { + return c.connPool.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *PublisherClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// CreateTopic creates the given topic with the given name. See the +// +// resource name rules (at https://cloud.google.com/pubsub/docs/admin#resource_names). +func (c *PublisherClient) CreateTopic(ctx context.Context, req *pubsubpb.Topic, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateTopic[0:len(c.CallOptions.CreateTopic):len(c.CallOptions.CreateTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.CreateTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateTopic updates an existing topic. Note that certain properties of a +// topic are not modifiable. +func (c *PublisherClient) UpdateTopic(ctx context.Context, req *pubsubpb.UpdateTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic.name", url.QueryEscape(req.GetTopic().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateTopic[0:len(c.CallOptions.UpdateTopic):len(c.CallOptions.UpdateTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.UpdateTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// Publish adds one or more messages to the topic. Returns NOT_FOUND if the topic +// does not exist. +func (c *PublisherClient) Publish(ctx context.Context, req *pubsubpb.PublishRequest, opts ...gax.CallOption) (*pubsubpb.PublishResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.Publish[0:len(c.CallOptions.Publish):len(c.CallOptions.Publish)], opts...) + var resp *pubsubpb.PublishResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.Publish(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetTopic gets the configuration of a topic. +func (c *PublisherClient) GetTopic(ctx context.Context, req *pubsubpb.GetTopicRequest, opts ...gax.CallOption) (*pubsubpb.Topic, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetTopic[0:len(c.CallOptions.GetTopic):len(c.CallOptions.GetTopic)], opts...) + var resp *pubsubpb.Topic + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.GetTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListTopics lists matching topics. +func (c *PublisherClient) ListTopics(ctx context.Context, req *pubsubpb.ListTopicsRequest, opts ...gax.CallOption) *TopicIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListTopics[0:len(c.CallOptions.ListTopics):len(c.CallOptions.ListTopics)], opts...) + it := &TopicIterator{} + req = proto.Clone(req).(*pubsubpb.ListTopicsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Topic, string, error) { + var resp *pubsubpb.ListTopicsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopics(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Topics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// ListTopicSubscriptions lists the names of the subscriptions on this topic. +func (c *PublisherClient) ListTopicSubscriptions(ctx context.Context, req *pubsubpb.ListTopicSubscriptionsRequest, opts ...gax.CallOption) *StringIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListTopicSubscriptions[0:len(c.CallOptions.ListTopicSubscriptions):len(c.CallOptions.ListTopicSubscriptions)], opts...) + it := &StringIterator{} + req = proto.Clone(req).(*pubsubpb.ListTopicSubscriptionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *pubsubpb.ListTopicSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopicSubscriptions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// ListTopicSnapshots lists the names of the snapshots on this topic. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +func (c *PublisherClient) ListTopicSnapshots(ctx context.Context, req *pubsubpb.ListTopicSnapshotsRequest, opts ...gax.CallOption) *StringIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListTopicSnapshots[0:len(c.CallOptions.ListTopicSnapshots):len(c.CallOptions.ListTopicSnapshots)], opts...) + it := &StringIterator{} + req = proto.Clone(req).(*pubsubpb.ListTopicSnapshotsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *pubsubpb.ListTopicSnapshotsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.publisherClient.ListTopicSnapshots(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Snapshots, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// DeleteTopic deletes the topic with the given name. Returns NOT_FOUND if the topic +// does not exist. After a topic is deleted, a new topic may be created with +// the same name; this is an entirely new topic with none of the old +// configuration or subscriptions. Existing subscriptions to this topic are +// not deleted, but their topic field is set to _deleted-topic_. +func (c *PublisherClient) DeleteTopic(ctx context.Context, req *pubsubpb.DeleteTopicRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "topic", url.QueryEscape(req.GetTopic()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteTopic[0:len(c.CallOptions.DeleteTopic):len(c.CallOptions.DeleteTopic)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.publisherClient.DeleteTopic(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *StringIterator) bufLen() int { + return len(it.items) +} + +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// TopicIterator manages a stream of *pubsubpb.Topic. +type TopicIterator struct { + items []*pubsubpb.Topic + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Topic, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TopicIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TopicIterator) Next() (*pubsubpb.Topic, error) { + var item *pubsubpb.Topic + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *TopicIterator) bufLen() int { + return len(it.items) +} + +func (it *TopicIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go new file mode 100644 index 000000000000..5bbf8908059b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/apiv1/subscriber_client.go @@ -0,0 +1,808 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go_gapic. DO NOT EDIT. + +package pubsub + +import ( + "context" + "fmt" + "math" + "net/url" + "time" + + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + gtransport "google.golang.org/api/transport/grpc" + pubsubpb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" +) + +// SubscriberCallOptions contains the retry settings for each method of SubscriberClient. +type SubscriberCallOptions struct { + CreateSubscription []gax.CallOption + GetSubscription []gax.CallOption + UpdateSubscription []gax.CallOption + ListSubscriptions []gax.CallOption + DeleteSubscription []gax.CallOption + ModifyAckDeadline []gax.CallOption + Acknowledge []gax.CallOption + Pull []gax.CallOption + StreamingPull []gax.CallOption + ModifyPushConfig []gax.CallOption + GetSnapshot []gax.CallOption + ListSnapshots []gax.CallOption + CreateSnapshot []gax.CallOption + UpdateSnapshot []gax.CallOption + DeleteSnapshot []gax.CallOption + Seek []gax.CallOption +} + +func defaultSubscriberClientOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithEndpoint("pubsub.googleapis.com:443"), + option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), + option.WithScopes(DefaultAuthScopes()...), + option.WithGRPCDialOption(grpc.WithDefaultCallOptions( + grpc.MaxCallRecvMsgSize(math.MaxInt32))), + } +} + +func defaultSubscriberCallOptions() *SubscriberCallOptions { + return &SubscriberCallOptions{ + CreateSubscription: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetSubscription: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateSubscription: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListSubscriptions: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteSubscription: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ModifyAckDeadline: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Acknowledge: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Pull: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + StreamingPull: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.DeadlineExceeded, + codes.ResourceExhausted, + codes.Aborted, + codes.Internal, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ModifyPushConfig: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + GetSnapshot: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + ListSnapshots: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + CreateSnapshot: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + UpdateSnapshot: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + DeleteSnapshot: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + Seek: []gax.CallOption{ + gax.WithRetry(func() gax.Retryer { + return gax.OnCodes([]codes.Code{ + codes.Unknown, + codes.Aborted, + codes.Unavailable, + }, gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: 60000 * time.Millisecond, + Multiplier: 1.30, + }) + }), + }, + } +} + +// SubscriberClient is a client for interacting with Cloud Pub/Sub API. +// +// Methods, except Close, may be called concurrently. However, fields must not be modified concurrently with method calls. +type SubscriberClient struct { + // Connection pool of gRPC connections to the service. + connPool gtransport.ConnPool + + // The gRPC API client. + subscriberClient pubsubpb.SubscriberClient + + // The call options for this service. + CallOptions *SubscriberCallOptions + + // The x-goog-* metadata to be sent with each request. + xGoogMetadata metadata.MD +} + +// NewSubscriberClient creates a new subscriber client. +// +// The service that an application uses to manipulate subscriptions and to +// consume messages from a subscription via the Pull method or by +// establishing a bi-directional stream using the StreamingPull method. +func NewSubscriberClient(ctx context.Context, opts ...option.ClientOption) (*SubscriberClient, error) { + connPool, err := gtransport.DialPool(ctx, append(defaultSubscriberClientOptions(), opts...)...) + if err != nil { + return nil, err + } + c := &SubscriberClient{ + connPool: connPool, + CallOptions: defaultSubscriberCallOptions(), + + subscriberClient: pubsubpb.NewSubscriberClient(connPool), + } + c.SetGoogleClientInfo() + + return c, nil +} + +// Connection returns a connection to the API service. +// +// Deprecated. +func (c *SubscriberClient) Connection() *grpc.ClientConn { + return c.connPool.Conn() +} + +// Close closes the connection to the API service. The user should invoke this when +// the client is no longer required. +func (c *SubscriberClient) Close() error { + return c.connPool.Close() +} + +// SetGoogleClientInfo sets the name and version of the application in +// the `x-goog-api-client` header passed on each request. Intended for +// use by Google-written clients. +func (c *SubscriberClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", versionGo()}, keyval...) + kv = append(kv, "gapic", versionClient, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...)) +} + +// CreateSubscription creates a subscription to a given topic. See the +// +// resource name rules (at https://cloud.google.com/pubsub/docs/admin#resource_names). +// If the subscription already exists, returns ALREADY_EXISTS. +// If the corresponding topic doesn’t exist, returns NOT_FOUND. +// +// If the name is not provided in the request, the server will assign a random +// name for this subscription on the same project as the topic, conforming +// to the +// resource name +// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The +// generated name is populated in the returned Subscription object. Note that +// for REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSubscription(ctx context.Context, req *pubsubpb.Subscription, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateSubscription[0:len(c.CallOptions.CreateSubscription):len(c.CallOptions.CreateSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.CreateSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// GetSubscription gets the configuration details of a subscription. +func (c *SubscriberClient) GetSubscription(ctx context.Context, req *pubsubpb.GetSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetSubscription[0:len(c.CallOptions.GetSubscription):len(c.CallOptions.GetSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.GetSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSubscription updates an existing subscription. Note that certain properties of a +// subscription, such as its topic, are not modifiable. +func (c *SubscriberClient) UpdateSubscription(ctx context.Context, req *pubsubpb.UpdateSubscriptionRequest, opts ...gax.CallOption) (*pubsubpb.Subscription, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription.name", url.QueryEscape(req.GetSubscription().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateSubscription[0:len(c.CallOptions.UpdateSubscription):len(c.CallOptions.UpdateSubscription)], opts...) + var resp *pubsubpb.Subscription + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.UpdateSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSubscriptions lists matching subscriptions. +func (c *SubscriberClient) ListSubscriptions(ctx context.Context, req *pubsubpb.ListSubscriptionsRequest, opts ...gax.CallOption) *SubscriptionIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListSubscriptions[0:len(c.CallOptions.ListSubscriptions):len(c.CallOptions.ListSubscriptions)], opts...) + it := &SubscriptionIterator{} + req = proto.Clone(req).(*pubsubpb.ListSubscriptionsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Subscription, string, error) { + var resp *pubsubpb.ListSubscriptionsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.ListSubscriptions(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Subscriptions, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// DeleteSubscription deletes an existing subscription. All messages retained in the subscription +// are immediately dropped. Calls to Pull after deletion will return +// NOT_FOUND. After a subscription is deleted, a new one may be created with +// the same name, but the new one has no association with the old +// subscription or its topic unless the same topic is specified. +func (c *SubscriberClient) DeleteSubscription(ctx context.Context, req *pubsubpb.DeleteSubscriptionRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteSubscription[0:len(c.CallOptions.DeleteSubscription):len(c.CallOptions.DeleteSubscription)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.DeleteSubscription(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// ModifyAckDeadline modifies the ack deadline for a specific message. This method is useful +// to indicate that more time is needed to process a message by the +// subscriber, or to make the message available for redelivery if the +// processing was interrupted. Note that this does not modify the +// subscription-level ackDeadlineSeconds used for subsequent messages. +func (c *SubscriberClient) ModifyAckDeadline(ctx context.Context, req *pubsubpb.ModifyAckDeadlineRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ModifyAckDeadline[0:len(c.CallOptions.ModifyAckDeadline):len(c.CallOptions.ModifyAckDeadline)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.ModifyAckDeadline(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Acknowledge acknowledges the messages associated with the ack_ids in the +// AcknowledgeRequest. The Pub/Sub system can remove the relevant messages +// from the subscription. +// +// Acknowledging a message whose ack deadline has expired may succeed, +// but such a message may be redelivered later. Acknowledging a message more +// than once will not result in an error. +func (c *SubscriberClient) Acknowledge(ctx context.Context, req *pubsubpb.AcknowledgeRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.Acknowledge[0:len(c.CallOptions.Acknowledge):len(c.CallOptions.Acknowledge)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.Acknowledge(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Pull pulls messages from the server. The server may return UNAVAILABLE if +// there are too many concurrent pull requests pending for the given +// subscription. +func (c *SubscriberClient) Pull(ctx context.Context, req *pubsubpb.PullRequest, opts ...gax.CallOption) (*pubsubpb.PullResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.Pull[0:len(c.CallOptions.Pull):len(c.CallOptions.Pull)], opts...) + var resp *pubsubpb.PullResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.Pull(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// StreamingPull establishes a stream with the server, which sends messages down to the +// client. The client streams acknowledgements and ack deadline modifications +// back to the server. The server will close the stream and return the status +// on any error. The server may close the stream with status UNAVAILABLE to +// reassign server-side resources, in which case, the client should +// re-establish the stream. Flow control can be achieved by configuring the +// underlying RPC channel. +func (c *SubscriberClient) StreamingPull(ctx context.Context, opts ...gax.CallOption) (pubsubpb.Subscriber_StreamingPullClient, error) { + ctx = insertMetadata(ctx, c.xGoogMetadata) + opts = append(c.CallOptions.StreamingPull[0:len(c.CallOptions.StreamingPull):len(c.CallOptions.StreamingPull)], opts...) + var resp pubsubpb.Subscriber_StreamingPullClient + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.StreamingPull(ctx, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ModifyPushConfig modifies the PushConfig for a specified subscription. +// +// This may be used to change a push subscription to a pull one (signified by +// an empty PushConfig) or vice versa, or change the endpoint URL and other +// attributes of a push subscription. Messages will accumulate for delivery +// continuously through the call regardless of changes to the PushConfig. +func (c *SubscriberClient) ModifyPushConfig(ctx context.Context, req *pubsubpb.ModifyPushConfigRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ModifyPushConfig[0:len(c.CallOptions.ModifyPushConfig):len(c.CallOptions.ModifyPushConfig)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.ModifyPushConfig(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// GetSnapshot gets the configuration details of a snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in bulk. That +// is, you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. +func (c *SubscriberClient) GetSnapshot(ctx context.Context, req *pubsubpb.GetSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.GetSnapshot[0:len(c.CallOptions.GetSnapshot):len(c.CallOptions.GetSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.GetSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// ListSnapshots lists the existing snapshots. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +func (c *SubscriberClient) ListSnapshots(ctx context.Context, req *pubsubpb.ListSnapshotsRequest, opts ...gax.CallOption) *SnapshotIterator { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "project", url.QueryEscape(req.GetProject()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.ListSnapshots[0:len(c.CallOptions.ListSnapshots):len(c.CallOptions.ListSnapshots)], opts...) + it := &SnapshotIterator{} + req = proto.Clone(req).(*pubsubpb.ListSnapshotsRequest) + it.InternalFetch = func(pageSize int, pageToken string) ([]*pubsubpb.Snapshot, string, error) { + var resp *pubsubpb.ListSnapshotsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.ListSnapshots(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + + it.Response = resp + return resp.Snapshots, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil + } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + it.pageInfo.MaxSize = int(req.PageSize) + it.pageInfo.Token = req.PageToken + return it +} + +// CreateSnapshot creates a snapshot from the requested subscription. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +// +// +// If the snapshot already exists, returns ALREADY_EXISTS. +// If the requested subscription doesn’t exist, returns NOT_FOUND. +// If the backlog in the subscription is too old – and the resulting snapshot +// would expire in less than 1 hour – then FAILED_PRECONDITION is returned. +// See also the Snapshot.expire_time field. If the name is not provided in +// the request, the server will assign a random +// name for this snapshot on the same project as the subscription, conforming +// to the +// resource name +// format (at https://cloud.google.com/pubsub/docs/admin#resource_names). The +// generated name is populated in the returned Snapshot object. Note that for +// REST API requests, you must specify a name in the request. +func (c *SubscriberClient) CreateSnapshot(ctx context.Context, req *pubsubpb.CreateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "name", url.QueryEscape(req.GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.CreateSnapshot[0:len(c.CallOptions.CreateSnapshot):len(c.CallOptions.CreateSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.CreateSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// UpdateSnapshot updates an existing snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +func (c *SubscriberClient) UpdateSnapshot(ctx context.Context, req *pubsubpb.UpdateSnapshotRequest, opts ...gax.CallOption) (*pubsubpb.Snapshot, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot.name", url.QueryEscape(req.GetSnapshot().GetName()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.UpdateSnapshot[0:len(c.CallOptions.UpdateSnapshot):len(c.CallOptions.UpdateSnapshot)], opts...) + var resp *pubsubpb.Snapshot + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.UpdateSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// DeleteSnapshot removes an existing snapshot. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. +// +// +// When the snapshot is deleted, all messages retained in the snapshot +// are immediately dropped. After a snapshot is deleted, a new one may be +// created with the same name, but the new one has no association with the old +// snapshot or its subscription, unless the same subscription is specified. +func (c *SubscriberClient) DeleteSnapshot(ctx context.Context, req *pubsubpb.DeleteSnapshotRequest, opts ...gax.CallOption) error { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "snapshot", url.QueryEscape(req.GetSnapshot()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.DeleteSnapshot[0:len(c.CallOptions.DeleteSnapshot):len(c.CallOptions.DeleteSnapshot)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + _, err = c.subscriberClient.DeleteSnapshot(ctx, req, settings.GRPC...) + return err + }, opts...) + return err +} + +// Seek seeks an existing subscription to a point in time or to a given snapshot, +// whichever is provided in the request. Snapshots are used in +// Seek (at https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow +// you to manage message acknowledgments in bulk. That is, you can set the +// acknowledgment state of messages in an existing subscription to the state +// captured by a snapshot. Note that both the subscription and the snapshot +// must be on the same topic. +func (c *SubscriberClient) Seek(ctx context.Context, req *pubsubpb.SeekRequest, opts ...gax.CallOption) (*pubsubpb.SeekResponse, error) { + md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "subscription", url.QueryEscape(req.GetSubscription()))) + ctx = insertMetadata(ctx, c.xGoogMetadata, md) + opts = append(c.CallOptions.Seek[0:len(c.CallOptions.Seek):len(c.CallOptions.Seek)], opts...) + var resp *pubsubpb.SeekResponse + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.subscriberClient.Seek(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, err + } + return resp, nil +} + +// SnapshotIterator manages a stream of *pubsubpb.Snapshot. +type SnapshotIterator struct { + items []*pubsubpb.Snapshot + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Snapshot, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SnapshotIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SnapshotIterator) Next() (*pubsubpb.Snapshot, error) { + var item *pubsubpb.Snapshot + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SnapshotIterator) bufLen() int { + return len(it.items) +} + +func (it *SnapshotIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// SubscriptionIterator manages a stream of *pubsubpb.Subscription. +type SubscriptionIterator struct { + items []*pubsubpb.Subscription + pageInfo *iterator.PageInfo + nextFunc func() error + + // Response is the raw response for the current page. + // It must be cast to the RPC response type. + // Calling Next() or InternalFetch() updates this value. + Response interface{} + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*pubsubpb.Subscription, nextPageToken string, err error) +} + +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *SubscriptionIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo +} + +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *SubscriptionIterator) Next() (*pubsubpb.Subscription, error) { + var item *pubsubpb.Subscription + if err := it.nextFunc(); err != nil { + return item, err + } + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *SubscriptionIterator) bufLen() int { + return len(it.items) +} + +func (it *SubscriptionIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} diff --git a/vendor/cloud.google.com/go/pubsub/debug.go b/vendor/cloud.google.com/go/pubsub/debug.go new file mode 100644 index 000000000000..977ae577f7e7 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/debug.go @@ -0,0 +1,72 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build psdebug + +package pubsub + +import ( + "sync" + "time" +) + +var ( + dmu sync.Mutex + msgTraces = map[string][]Event{} + ackIDToMsgID = map[string]string{} +) + +type Event struct { + Desc string + At time.Time +} + +func MessageEvents(msgID string) []Event { + dmu.Lock() + defer dmu.Unlock() + return msgTraces[msgID] +} + +func addRecv(msgID, ackID string, t time.Time) { + dmu.Lock() + defer dmu.Unlock() + ackIDToMsgID[ackID] = msgID + addEvent(msgID, "recv", t) +} + +func addAcks(ackIDs []string) { + dmu.Lock() + defer dmu.Unlock() + now := time.Now() + for _, id := range ackIDs { + addEvent(ackIDToMsgID[id], "ack", now) + } +} + +func addModAcks(ackIDs []string, deadlineSecs int32) { + dmu.Lock() + defer dmu.Unlock() + desc := "modack" + if deadlineSecs == 0 { + desc = "nack" + } + now := time.Now() + for _, id := range ackIDs { + addEvent(ackIDToMsgID[id], desc, now) + } +} + +func addEvent(msgID, desc string, t time.Time) { + msgTraces[msgID] = append(msgTraces[msgID], Event{desc, t}) +} diff --git a/vendor/cloud.google.com/go/pubsub/doc.go b/vendor/cloud.google.com/go/pubsub/doc.go new file mode 100644 index 000000000000..a86fc3d4a594 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/doc.go @@ -0,0 +1,140 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package pubsub provides an easy way to publish and receive Google Cloud Pub/Sub +messages, hiding the details of the underlying server RPCs. Google Cloud +Pub/Sub is a many-to-many, asynchronous messaging system that decouples senders +and receivers. + +More information about Google Cloud Pub/Sub is available at +https://cloud.google.com/pubsub/docs + +See https://godoc.org/cloud.google.com/go for authentication, timeouts, +connection pooling and similar aspects of this package. + + +Publishing + +Google Cloud Pub/Sub messages are published to topics. Topics may be created +using the pubsub package like so: + + topic, err := pubsubClient.CreateTopic(context.Background(), "topic-name") + +Messages may then be published to a topic: + + res := topic.Publish(ctx, &pubsub.Message{Data: []byte("payload")}) + +Publish queues the message for publishing and returns immediately. When enough +messages have accumulated, or enough time has elapsed, the batch of messages is +sent to the Pub/Sub service. + +Publish returns a PublishResult, which behaves like a future: its Get method +blocks until the message has been sent to the service. + +The first time you call Publish on a topic, goroutines are started in the +background. To clean up these goroutines, call Stop: + + topic.Stop() + + +Receiving + +To receive messages published to a topic, clients create subscriptions +to the topic. There may be more than one subscription per topic; each message +that is published to the topic will be delivered to all of its subscriptions. + +Subsciptions may be created like so: + + sub, err := pubsubClient.CreateSubscription(context.Background(), "sub-name", + pubsub.SubscriptionConfig{Topic: topic}) + +Messages are then consumed from a subscription via callback. + + err := sub.Receive(context.Background(), func(ctx context.Context, m *Message) { + log.Printf("Got message: %s", m.Data) + m.Ack() + }) + if err != nil { + // Handle error. + } + +The callback is invoked concurrently by multiple goroutines, maximizing +throughput. To terminate a call to Receive, cancel its context. + +Once client code has processed the message, it must call Message.Ack or +message.Nack, otherwise the message will eventually be redelivered. If the +client cannot or doesn't want to process the message, it can call Message.Nack +to speed redelivery. For more information and configuration options, see +"Deadlines" below. + +Note: It is possible for Messages to be redelivered, even if Message.Ack has +been called. Client code must be robust to multiple deliveries of messages. + +Note: This uses pubsub's streaming pull feature. This feature properties that +may be surprising. Please take a look at https://cloud.google.com/pubsub/docs/pull#streamingpull +for more details on how streaming pull behaves compared to the synchronous +pull method. + + +Deadlines + +The default pubsub deadlines are suitable for most use cases, but may be +overridden. This section describes the tradeoffs that should be considered +when overriding the defaults. + +Behind the scenes, each message returned by the Pub/Sub server has an +associated lease, known as an "ACK deadline". Unless a message is +acknowledged within the ACK deadline, or the client requests that +the ACK deadline be extended, the message will become eligible for redelivery. + +As a convenience, the pubsub client will automatically extend deadlines until +either: + * Message.Ack or Message.Nack is called, or + * The "MaxExtension" period elapses from the time the message is fetched from the server. + +ACK deadlines are extended periodically by the client. The initial ACK +deadline given to messages is 10s. The period between extensions, as well as the +length of the extension, automatically adjust depending on the time it takes to ack +messages, up to 10m. This has the effect that subscribers that process messages +quickly have their message ack deadlines extended for a short amount, whereas +subscribers that process message slowly have their message ack deadlines extended +for a large amount. The net effect is fewer RPCs sent from the client library. + +For example, consider a subscriber that takes 3 minutes to process each message. +Since the library has already recorded several 3 minute "time to ack"s in a +percentile distribution, future message extensions are sent with a value of 3 +minutes, every 3 minutes. Suppose the application crashes 5 seconds after the +library sends such an extension: the Pub/Sub server would wait the remaining +2m55s before re-sending the messages out to other subscribers. + +Please note that the client library does not use the subscription's AckDeadline +by default. To enforce the subscription AckDeadline, set MaxExtension to the +subscription's AckDeadline: + + cfg, err := sub.Config(ctx) + if err != nil { + // TODO: handle err + } + + sub.ReceiveSettings.MaxExtension = cfg.AckDeadline + + +Slow Message Processing + +For use cases where message processing exceeds 30 minutes, we recommend using +the base client in a pull model, since long-lived streams are periodically killed +by firewalls. See the example at https://godoc.org/cloud.google.com/go/pubsub/apiv1#example-SubscriberClient-Pull-LengthyClientProcessing +*/ +package pubsub // import "cloud.google.com/go/pubsub" diff --git a/vendor/cloud.google.com/go/pubsub/flow_controller.go b/vendor/cloud.google.com/go/pubsub/flow_controller.go new file mode 100644 index 000000000000..3f165a0ac18c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/flow_controller.go @@ -0,0 +1,122 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "sync/atomic" + + "golang.org/x/sync/semaphore" +) + +// flowController implements flow control for Subscription.Receive. +type flowController struct { + maxCount int + maxSize int // max total size of messages + semCount, semSize *semaphore.Weighted // enforces max number and size of messages + // Number of calls to acquire - number of calls to release. This can go + // negative if semCount == nil and a large acquire is followed by multiple + // small releases. + // Atomic. + countRemaining int64 +} + +// newFlowController creates a new flowController that ensures no more than +// maxCount messages or maxSize bytes are outstanding at once. If maxCount or +// maxSize is < 1, then an unlimited number of messages or bytes is permitted, +// respectively. +func newFlowController(maxCount, maxSize int) *flowController { + fc := &flowController{ + maxCount: maxCount, + maxSize: maxSize, + semCount: nil, + semSize: nil, + } + if maxCount > 0 { + fc.semCount = semaphore.NewWeighted(int64(maxCount)) + } + if maxSize > 0 { + fc.semSize = semaphore.NewWeighted(int64(maxSize)) + } + return fc +} + +// acquire blocks until one message of size bytes can proceed or ctx is done. +// It returns nil in the first case, or ctx.Err() in the second. +// +// acquire allows large messages to proceed by treating a size greater than maxSize +// as if it were equal to maxSize. +func (f *flowController) acquire(ctx context.Context, size int) error { + if f.semCount != nil { + if err := f.semCount.Acquire(ctx, 1); err != nil { + return err + } + } + if f.semSize != nil { + if err := f.semSize.Acquire(ctx, f.bound(size)); err != nil { + if f.semCount != nil { + f.semCount.Release(1) + } + return err + } + } + atomic.AddInt64(&f.countRemaining, 1) + return nil +} + +// tryAcquire returns false if acquire would block. Otherwise, it behaves like +// acquire and returns true. +// +// tryAcquire allows large messages to proceed by treating a size greater than +// maxSize as if it were equal to maxSize. +func (f *flowController) tryAcquire(size int) bool { + if f.semCount != nil { + if !f.semCount.TryAcquire(1) { + return false + } + } + if f.semSize != nil { + if !f.semSize.TryAcquire(f.bound(size)) { + if f.semCount != nil { + f.semCount.Release(1) + } + return false + } + } + atomic.AddInt64(&f.countRemaining, 1) + return true +} + +// release notes that one message of size bytes is no longer outstanding. +func (f *flowController) release(size int) { + atomic.AddInt64(&f.countRemaining, -1) + if f.semCount != nil { + f.semCount.Release(1) + } + if f.semSize != nil { + f.semSize.Release(f.bound(size)) + } +} + +func (f *flowController) bound(size int) int64 { + if size > f.maxSize { + return int64(f.maxSize) + } + return int64(size) +} + +func (f *flowController) count() int { + return int(atomic.LoadInt64(&f.countRemaining)) +} diff --git a/vendor/cloud.google.com/go/pubsub/go.mod b/vendor/cloud.google.com/go/pubsub/go.mod new file mode 100644 index 000000000000..3417dd91383a --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/go.mod @@ -0,0 +1,19 @@ +module cloud.google.com/go/pubsub + +go 1.11 + +require ( + cloud.google.com/go v0.54.0 + cloud.google.com/go/bigquery v1.5.0 // indirect + github.com/golang/protobuf v1.3.4 + github.com/google/go-cmp v0.4.0 + github.com/googleapis/gax-go/v2 v2.0.5 + go.opencensus.io v0.22.3 + golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d + golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e + golang.org/x/time v0.0.0-20191024005414-555d28b269f0 + golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d // indirect + google.golang.org/api v0.20.0 + google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 + google.golang.org/grpc v1.28.0 +) diff --git a/vendor/cloud.google.com/go/pubsub/go.sum b/vendor/cloud.google.com/go/pubsub/go.sum new file mode 100644 index 000000000000..5befa3395a00 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/go.sum @@ -0,0 +1,373 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1 h1:lRi0CHyU+ytlvylOlFKKq0af6JncuyoRh1J+QJBqQx0= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3 h1:AVXDdKsrtX33oR9fbCMu/+c1o8Ofjq6Ku/MInaLVg5Y= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0 h1:0E3eE8MX426vUOs7aHfI7aN1BrIzzzf4ccKCSfSjGmc= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0 h1:GGslhk/BU052LPlnI1vpp3fcbUs+hQ3E+Doti/3/vF8= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0 h1:MZQCQQaRwOrAcuKjiHWHrgKykt4fZyuwF2dtiG3fGW8= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0 h1:3ithwDMr7/3vpAMXiH+ZQnYbuIsh+OPhUPMFC9enmn0= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0 h1:sAbMqjY1PEQKZBWfbu6Y6bsupJ9c4QdHnzg/VvYTLcE= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0 h1:K2NyuHRuv15ku6eUpe0DQk5ZykPMnSOnvuVf6IHcjaE= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0 h1:/May9ojXjRkPBNVrq+oWLqmWCkr4OU5uRY29bu0mRyQ= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0 h1:RPUcBvDeYgQFMfQu1eBMq6piD1SXmLH+vK3qjewZPus= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0 h1:UDpwYIwla4jHGzZJaEJYx1tOejbgSoNqsAfHAUYe2r8= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1 h1:Xye71clBPdm5HgqGwUkwhbynsUJZhDbS20FvLhQ2izg= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0 h1:C9hSCOW830chIVkdja34wa6Ky+IzWllkUinR+BtRZd4= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5 h1:58fnuSXlxZmFdJyvtTFVmVhcMLU6v5fEb/ok4wyqtNU= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522 h1:OeRHuibLsmZkFj773W4LcfAGsSxJgfPONhr8cmO+eLA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979 h1:Agxu5KLo8o7Bb634SVDnhIfpTvxmzUwhbYAzBvXt6h4= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587 h1:5Uz0rkjCFu9BC9gCRN7EkwVvhNyQgGWb8KNJrPwBoHY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a h1:7Wlg8L54In96HTWOaI4sreLJ6qfyGuvSau5el3fK41Y= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd h1:zkO/Lhoka23X63N9OSzpSeROEUQ5ODw47tM3YWjygbs= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN5RBoH5xkJk3CqlMI/Y= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422 h1:QzoH/1pFpZguR8NrRHLcO6jKqfv2zpuSqZLgdm7ZmjI= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac h1:8R1esu+8QioDxo4E4mX6bFztO+dMTM49DNAaWfO5OeY= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee h1:WG0RUwxtNT4qqaXX3DPA8zHFNm/D9xaBpxzHt1WcA/E= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553 h1:efeOvDhwQ29Dj3SdAV/MJf8oukgn+8D8WgaCaRMchF8= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa h1:F+8P+gmewFQYRk6JoLQLwjBCTu3mcIURZfNkVweuRKA= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2 h1:CCH4IOTTfewWjGOlSp+zGcjutRKlBEZQ6wTn8ozI/nI= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0 h1:MsuvTghUPjX762sGLnGsxC3HM0B5r83wEtYcYR8/vRs= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b h1:0mm1VjtFUOIlE1SbDlwjYaDxZVDP2S5ou6y0gSgXHu8= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a h1:GuSPYbZzB5/dcLNCwLQLsg3obCJtX9IJhpXkvY7kzk0= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6 h1:pE8b58s1HRDMi8RDc79m0HISf9D4TzseP40cEA6IGfs= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e h1:vcxGaoTs7kV8m5Np9uUNQin4BrLOthgV7252N8V+FwY= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0 h1:HyfiK1WMnHj5FXFXatD+Qs1A/xC2Run6RzeW1SyHxpc= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8 h1:JA8d3MPx/IToSyXZG/RhwYEtfrKO1Fxrqe8KrkiLXKM= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1 h1:gZpLHxUX5BdYLA08Lj4YCJNN/jk7KtquiArPoeX0WvA= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4 h1:sfkvUWPNGwSV+8/fNqctR5lS2AqCSqYwXdrjCxp/dXo= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae h1:/WDfKMnPU+m5M4xB+6x4kaepxRw6jWvR5iDRdvjHgy8= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527 h1:uYVVQ9WP/Ds2ROhcaGPeIdVq0RIXVLwsHlnvJ+cT1So= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0 h1:Dh6fw+p6FyRl5x/FvNswO1ji0lIGzm3KP8Y9VkS9PTE= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff h1:On1qIo75ByTwFJ4/W2bIqHcwJ9XAqtSWUs8GwRrIhtc= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4 h1:Toz2IK7k8rbltAXwNAxKcn9OzqyNfMUhUNjz3sL0NMk= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c h1:2EA2K0k9bcvvEDlqD8xdlOhCOqq+O/p9Voqi4x9W1YU= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a h1:7YaEqUc1tUg0yDwvdX+3U5bwrBg7u3FFAZ5D8gUs4/c= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56 h1:DFtSed2q3HtNuVazwVDZ4nSRS/JrZEig0gz2BY4VNrg= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2 h1:L/G4KZvrQn7FWLN/LlulBtBzrLUhqjiGfTWWDmrh+IQ= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d h1:7M9AXzLrJWWGdDYtBblPHBTnHtaN6KKQ98OYb35mLlY= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d h1:3K34ovZAOnVaUPxanr0j4ghTZTPTA0CnXvjCl+5lZqk= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0 h1:jbyannxz0XFD3zdjgrSUsaJbgpH4eTrkdhRChkHPfO8= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0 h1:0q95w+VuFtv4PAx4PZVQdBMmYbaCHbnfKaEiDIcVyag= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0 h1:TgDr+1inK2XVUKZx3BYAqQg/GwucGdBkzZjWaTg/I+A= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0 h1:GwFK8+l5/gdsOYKz5p6M4UK+QT8OvmHWZPJCnf+5DjA= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0 h1:jz2KixHX7EcCPiQrySzPdnYT7DbINAypCqKZ1Z7GM40= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1 h1:QzqyMA1tlu6CgqCDUtU9V+ZKhLFT2dkJuANu5QaxI3I= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55 h1:gSJIx1SDwno+2ElGhA4+qG2zF97qiUzTM+rQ0klBOcE= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51 h1:Ex1mq5jaJof+kRnYi3SlYJ8KKa9Ao3NHyIT5XJ1gF6U= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba h1:pRj9OXZbwNtbtZtOB4dLwfK4u+EVRMvP+e9zKkg2grM= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150 h1:VPpdpQkGvFicX9yo4G5oxZPi9ALBnEOZblPSa/Wa2m4= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce h1:1mbrb1tUU+Zmt5C94IGKADBTJZjZXAd+BubWi7r9EiI= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63 h1:YzfoEYWbODU5Fbt37+h7X16BWQbad7Q4S6gclTKFXM8= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383 h1:Vo0fD5w0fUKriWlZLyrim2GXbumyN0D6euW79T9PgEE= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171 h1:xes2Q2k+d/+YNXVw0FpZkIDJiaux4OVrRKXRAzH6A0U= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672 h1:jiDSspVssiikoRPFHT6pYrL+CL6/yIc3b9AuHO/4xik= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.1 h1:j6XxA85m/6txkUCHvzlV5f+HBNl/1r5cZ2A/3IEFOO8= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0 h1:rRYRFMVgRv6E0D70Skyfsr28tDXIuuPZyWGMPdMcnXg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a h1:LJwr7TCTghdatWv40WobzlKXc9c4s8oGa7QKJUtHhWA= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3 h1:sXmLre5bzIR6ypkjXCDI3jHPssRhc8KD/Ome589sc3U= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go b/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go new file mode 100644 index 000000000000..20b865930b9c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/go_mod_tidy_hack.go @@ -0,0 +1,22 @@ +// Copyright 2019 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// This file, and the cloud.google.com/go import, won't actually become part of +// the resultant binary. +// +build modhack + +package pubsub + +// Necessary for safely adding multi-module repo. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository +import _ "cloud.google.com/go" diff --git a/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go new file mode 100644 index 000000000000..3c061fb1d8b4 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/internal/distribution/distribution.go @@ -0,0 +1,79 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package distribution + +import ( + "log" + "math" + "sort" + "sync" + "sync/atomic" +) + +// D is a distribution. Methods of D can be called concurrently by multiple +// goroutines. +type D struct { + buckets []uint64 + // sumsReuse is the scratch space that is reused + // to store sums during invocations of Percentile. + // After an invocation of New(n): + // len(buckets) == len(sumsReuse) == n + sumsReuse []uint64 + mu sync.Mutex +} + +// New creates a new distribution capable of holding values from 0 to n-1. +func New(n int) *D { + return &D{ + buckets: make([]uint64, n), + sumsReuse: make([]uint64, n), + } +} + +// Record records value v to the distribution. +// To help with distributions with long tails, if v is larger than the maximum value, +// Record records the maximum value instead. +// If v is negative, Record panics. +func (d *D) Record(v int) { + if v < 0 { + log.Panicf("Record: value out of range: %d", v) + } else if v >= len(d.buckets) { + v = len(d.buckets) - 1 + } + atomic.AddUint64(&d.buckets[v], 1) +} + +// Percentile computes the p-th percentile of the distribution where +// p is between 0 and 1. This method may be called by multiple goroutines. +func (d *D) Percentile(p float64) int { + // NOTE: This implementation uses the nearest-rank method. + // https://en.wikipedia.org/wiki/Percentile#The_nearest-rank_method + + if p < 0 || p > 1 { + log.Panicf("Percentile: percentile out of range: %f", p) + } + + d.mu.Lock() + defer d.mu.Unlock() + + var sum uint64 + for i := range d.sumsReuse { + sum += atomic.LoadUint64(&d.buckets[i]) + d.sumsReuse[i] = sum + } + + target := uint64(math.Ceil(float64(sum) * p)) + return sort.Search(len(d.sumsReuse), func(i int) bool { return d.sumsReuse[i] >= target }) +} diff --git a/vendor/cloud.google.com/go/pubsub/iterator.go b/vendor/cloud.google.com/go/pubsub/iterator.go new file mode 100644 index 000000000000..c42fb02d71b1 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/iterator.go @@ -0,0 +1,533 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "io" + "sync" + "time" + + vkit "cloud.google.com/go/pubsub/apiv1" + "cloud.google.com/go/pubsub/internal/distribution" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Between message receipt and ack (that is, the time spent processing a message) we want to extend the message +// deadline by way of modack. However, we don't want to extend the deadline right as soon as the deadline expires; +// instead, we'd want to extend the deadline a little bit of time ahead. gracePeriod is that amount of time ahead +// of the actual deadline. +const gracePeriod = 5 * time.Second + +type messageIterator struct { + ctx context.Context + cancel func() // the function that will cancel ctx; called in stop + po *pullOptions + ps *pullStream + subc *vkit.SubscriberClient + subName string + maxExtensionPeriod *time.Duration + kaTick <-chan time.Time // keep-alive (deadline extensions) + ackTicker *time.Ticker // message acks + nackTicker *time.Ticker // message nacks (more frequent than acks) + pingTicker *time.Ticker // sends to the stream to keep it open + failed chan struct{} // closed on stream error + drained chan struct{} // closed when stopped && no more pending messages + wg sync.WaitGroup + + mu sync.Mutex + ackTimeDist *distribution.D // dist uses seconds + + // keepAliveDeadlines is a map of id to expiration time. This map is used in conjunction with + // subscription.ReceiveSettings.MaxExtension to record the maximum amount of time (the + // deadline, more specifically) we're willing to extend a message's ack deadline. As each + // message arrives, we'll record now+MaxExtension in this table; whenever we have a chance + // to update ack deadlines (via modack), we'll consult this table and only include IDs + // that are not beyond their deadline. + keepAliveDeadlines map[string]time.Time + pendingAcks map[string]bool + pendingNacks map[string]bool + pendingModAcks map[string]bool // ack IDs whose ack deadline is to be modified + err error // error from stream failure +} + +// newMessageIterator starts and returns a new messageIterator. +// subName is the full name of the subscription to pull messages from. +// Stop must be called on the messageIterator when it is no longer needed. +// The iterator always uses the background context for acking messages and extending message deadlines. +func newMessageIterator(subc *vkit.SubscriberClient, subName string, maxExtensionPeriod *time.Duration, po *pullOptions) *messageIterator { + var ps *pullStream + if !po.synchronous { + ps = newPullStream(context.Background(), subc.StreamingPull, subName) + } + // The period will update each tick based on the distribution of acks. We'll start by arbitrarily sending + // the first keepAlive halfway towards the minimum ack deadline. + keepAlivePeriod := minAckDeadline / 2 + + // Ack promptly so users don't lose work if client crashes. + ackTicker := time.NewTicker(100 * time.Millisecond) + nackTicker := time.NewTicker(100 * time.Millisecond) + pingTicker := time.NewTicker(30 * time.Second) + cctx, cancel := context.WithCancel(context.Background()) + it := &messageIterator{ + ctx: cctx, + cancel: cancel, + ps: ps, + po: po, + subc: subc, + subName: subName, + maxExtensionPeriod: maxExtensionPeriod, + kaTick: time.After(keepAlivePeriod), + ackTicker: ackTicker, + nackTicker: nackTicker, + pingTicker: pingTicker, + failed: make(chan struct{}), + drained: make(chan struct{}), + ackTimeDist: distribution.New(int(maxAckDeadline/time.Second) + 1), + keepAliveDeadlines: map[string]time.Time{}, + pendingAcks: map[string]bool{}, + pendingNacks: map[string]bool{}, + pendingModAcks: map[string]bool{}, + } + it.wg.Add(1) + go it.sender() + return it +} + +// Subscription.receive will call stop on its messageIterator when finished with it. +// Stop will block until Done has been called on all Messages that have been +// returned by Next, or until the context with which the messageIterator was created +// is cancelled or exceeds its deadline. +func (it *messageIterator) stop() { + it.cancel() + it.mu.Lock() + it.checkDrained() + it.mu.Unlock() + it.wg.Wait() +} + +// checkDrained closes the drained channel if the iterator has been stopped and all +// pending messages have either been n/acked or expired. +// +// Called with the lock held. +func (it *messageIterator) checkDrained() { + select { + case <-it.drained: + return + default: + } + select { + case <-it.ctx.Done(): + if len(it.keepAliveDeadlines) == 0 { + close(it.drained) + } + default: + } +} + +// Called when a message is acked/nacked. +func (it *messageIterator) done(ackID string, ack bool, receiveTime time.Time) { + it.ackTimeDist.Record(int(time.Since(receiveTime) / time.Second)) + it.mu.Lock() + defer it.mu.Unlock() + delete(it.keepAliveDeadlines, ackID) + if ack { + it.pendingAcks[ackID] = true + } else { + it.pendingNacks[ackID] = true + } + it.checkDrained() +} + +// fail is called when a stream method returns a permanent error. +// fail returns it.err. This may be err, or it may be the error +// set by an earlier call to fail. +func (it *messageIterator) fail(err error) error { + it.mu.Lock() + defer it.mu.Unlock() + if it.err == nil { + it.err = err + close(it.failed) + } + return it.err +} + +// receive makes a call to the stream's Recv method, or the Pull RPC, and returns +// its messages. +// maxToPull is the maximum number of messages for the Pull RPC. +func (it *messageIterator) receive(maxToPull int32) ([]*Message, error) { + it.mu.Lock() + ierr := it.err + it.mu.Unlock() + if ierr != nil { + return nil, ierr + } + + // Stop retrieving messages if the iterator's Stop method was called. + select { + case <-it.ctx.Done(): + it.wg.Wait() + return nil, io.EOF + default: + } + + var rmsgs []*pb.ReceivedMessage + var err error + if it.po.synchronous { + rmsgs, err = it.pullMessages(maxToPull) + } else { + rmsgs, err = it.recvMessages() + } + // Any error here is fatal. + if err != nil { + return nil, it.fail(err) + } + recordStat(it.ctx, PullCount, int64(len(rmsgs))) + msgs, err := convertMessages(rmsgs) + if err != nil { + return nil, it.fail(err) + } + // We received some messages. Remember them so we can keep them alive. Also, + // do a receipt mod-ack when streaming. + maxExt := time.Now().Add(it.po.maxExtension) + ackIDs := map[string]bool{} + it.mu.Lock() + now := time.Now() + for _, m := range msgs { + m.receiveTime = now + addRecv(m.ID, m.ackID, now) + m.doneFunc = it.done + it.keepAliveDeadlines[m.ackID] = maxExt + // Don't change the mod-ack if the message is going to be nacked. This is + // possible if there are retries. + if !it.pendingNacks[m.ackID] { + ackIDs[m.ackID] = true + } + } + deadline := it.ackDeadline() + it.mu.Unlock() + if len(ackIDs) > 0 { + if !it.sendModAck(ackIDs, deadline) { + return nil, it.err + } + } + return msgs, nil +} + +// Get messages using the Pull RPC. +// This may block indefinitely. It may also return zero messages, after some time waiting. +func (it *messageIterator) pullMessages(maxToPull int32) ([]*pb.ReceivedMessage, error) { + // Use it.ctx as the RPC context, so that if the iterator is stopped, the call + // will return immediately. + res, err := it.subc.Pull(it.ctx, &pb.PullRequest{ + Subscription: it.subName, + MaxMessages: maxToPull, + }, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + switch { + case err == context.Canceled: + return nil, nil + case err != nil: + return nil, err + default: + return res.ReceivedMessages, nil + } +} + +func (it *messageIterator) recvMessages() ([]*pb.ReceivedMessage, error) { + res, err := it.ps.Recv() + if err != nil { + return nil, err + } + return res.ReceivedMessages, nil +} + +// sender runs in a goroutine and handles all sends to the stream. +func (it *messageIterator) sender() { + defer it.wg.Done() + defer it.ackTicker.Stop() + defer it.nackTicker.Stop() + defer it.pingTicker.Stop() + defer func() { + if it.ps != nil { + it.ps.CloseSend() + } + }() + + done := false + for !done { + sendAcks := false + sendNacks := false + sendModAcks := false + sendPing := false + + dl := it.ackDeadline() + + select { + case <-it.failed: + // Stream failed: nothing to do, so stop immediately. + return + + case <-it.drained: + // All outstanding messages have been marked done: + // nothing left to do except make the final calls. + it.mu.Lock() + sendAcks = (len(it.pendingAcks) > 0) + sendNacks = (len(it.pendingNacks) > 0) + // No point in sending modacks. + done = true + + case <-it.kaTick: + it.mu.Lock() + it.handleKeepAlives() + sendModAcks = (len(it.pendingModAcks) > 0) + + nextTick := dl - gracePeriod + if nextTick <= 0 { + // If the deadline is <= gracePeriod, let's tick again halfway to + // the deadline. + nextTick = dl / 2 + } + it.kaTick = time.After(nextTick) + + case <-it.nackTicker.C: + it.mu.Lock() + sendNacks = (len(it.pendingNacks) > 0) + + case <-it.ackTicker.C: + it.mu.Lock() + sendAcks = (len(it.pendingAcks) > 0) + + case <-it.pingTicker.C: + it.mu.Lock() + // Ping only if we are processing messages via streaming. + sendPing = !it.po.synchronous + } + // Lock is held here. + var acks, nacks, modAcks map[string]bool + if sendAcks { + acks = it.pendingAcks + it.pendingAcks = map[string]bool{} + } + if sendNacks { + nacks = it.pendingNacks + it.pendingNacks = map[string]bool{} + } + if sendModAcks { + modAcks = it.pendingModAcks + it.pendingModAcks = map[string]bool{} + } + it.mu.Unlock() + // Make Ack and ModAck RPCs. + if sendAcks { + if !it.sendAck(acks) { + return + } + } + if sendNacks { + // Nack indicated by modifying the deadline to zero. + if !it.sendModAck(nacks, 0) { + return + } + } + if sendModAcks { + if !it.sendModAck(modAcks, dl) { + return + } + } + if sendPing { + it.pingStream() + } + } +} + +// handleKeepAlives modifies the pending request to include deadline extensions +// for live messages. It also purges expired messages. +// +// Called with the lock held. +func (it *messageIterator) handleKeepAlives() { + now := time.Now() + for id, expiry := range it.keepAliveDeadlines { + if expiry.Before(now) { + // This delete will not result in skipping any map items, as implied by + // the spec at https://golang.org/ref/spec#For_statements, "For + // statements with range clause", note 3, and stated explicitly at + // https://groups.google.com/forum/#!msg/golang-nuts/UciASUb03Js/pzSq5iVFAQAJ. + delete(it.keepAliveDeadlines, id) + } else { + // This will not conflict with a nack, because nacking removes the ID from keepAliveDeadlines. + it.pendingModAcks[id] = true + } + } + it.checkDrained() +} + +func (it *messageIterator) sendAck(m map[string]bool) bool { + // Account for the Subscription field. + overhead := calcFieldSizeString(it.subName) + return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { + recordStat(it.ctx, AckCount, int64(len(ids))) + addAcks(ids) + // Use context.Background() as the call's context, not it.ctx. We don't + // want to cancel this RPC when the iterator is stopped. + return it.subc.Acknowledge(context.Background(), &pb.AcknowledgeRequest{ + Subscription: it.subName, + AckIds: ids, + }) + }) +} + +// The receipt mod-ack amount is derived from a percentile distribution based +// on the time it takes to process messages. The percentile chosen is the 99%th +// percentile in order to capture the highest amount of time necessary without +// considering 1% outliers. +func (it *messageIterator) sendModAck(m map[string]bool, deadline time.Duration) bool { + deadlineSec := int32(deadline / time.Second) + // Account for the Subscription and AckDeadlineSeconds fields. + overhead := calcFieldSizeString(it.subName) + calcFieldSizeInt(int(deadlineSec)) + return it.sendAckIDRPC(m, maxPayload-overhead, func(ids []string) error { + if deadline == 0 { + recordStat(it.ctx, NackCount, int64(len(ids))) + } else { + recordStat(it.ctx, ModAckCount, int64(len(ids))) + } + addModAcks(ids, deadlineSec) + // Retry this RPC on Unavailable for a short amount of time, then give up + // without returning a fatal error. The utility of this RPC is by nature + // transient (since the deadline is relative to the current time) and it + // isn't crucial for correctness (since expired messages will just be + // resent). + cctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + bo := gax.Backoff{ + Initial: 100 * time.Millisecond, + Max: time.Second, + Multiplier: 2, + } + for { + err := it.subc.ModifyAckDeadline(cctx, &pb.ModifyAckDeadlineRequest{ + Subscription: it.subName, + AckDeadlineSeconds: deadlineSec, + AckIds: ids, + }) + switch status.Code(err) { + case codes.Unavailable: + if err := gax.Sleep(cctx, bo.Pause()); err == nil { + continue + } + // Treat sleep timeout like RPC timeout. + fallthrough + case codes.DeadlineExceeded: + // Timeout. Not a fatal error, but note that it happened. + recordStat(it.ctx, ModAckTimeoutCount, 1) + return nil + default: + // Any other error is fatal. + return err + } + } + }) +} + +func (it *messageIterator) sendAckIDRPC(ackIDSet map[string]bool, maxSize int, call func([]string) error) bool { + ackIDs := make([]string, 0, len(ackIDSet)) + for k := range ackIDSet { + ackIDs = append(ackIDs, k) + } + var toSend []string + for len(ackIDs) > 0 { + toSend, ackIDs = splitRequestIDs(ackIDs, maxSize) + if err := call(toSend); err != nil { + // The underlying client handles retries, so any error is fatal to the + // iterator. + it.fail(err) + return false + } + } + return true +} + +// Send a message to the stream to keep it open. The stream will close if there's no +// traffic on it for a while. By keeping it open, we delay the start of the +// expiration timer on messages that are buffered by gRPC or elsewhere in the +// network. This matters if it takes a long time to process messages relative to the +// default ack deadline, and if the messages are small enough so that many can fit +// into the buffer. +func (it *messageIterator) pingStream() { + // Ignore error; if the stream is broken, this doesn't matter anyway. + _ = it.ps.Send(&pb.StreamingPullRequest{}) +} + +// calcFieldSizeString returns the number of bytes string fields +// will take up in an encoded proto message. +func calcFieldSizeString(fields ...string) int { + overhead := 0 + for _, field := range fields { + overhead += 1 + len(field) + proto.SizeVarint(uint64(len(field))) + } + return overhead +} + +// calcFieldSizeInt returns the number of bytes int fields +// will take up in an encoded proto message. +func calcFieldSizeInt(fields ...int) int { + overhead := 0 + for _, field := range fields { + overhead += 1 + proto.SizeVarint(uint64(field)) + } + return overhead +} + +// splitRequestIDs takes a slice of ackIDs and returns two slices such that the first +// ackID slice can be used in a request where the payload does not exceed maxSize. +func splitRequestIDs(ids []string, maxSize int) (prefix, remainder []string) { + size := 0 + i := 0 + // TODO(hongalex): Use binary search to find split index, since ackIDs are + // fairly constant. + for size < maxSize && i < len(ids) { + size += calcFieldSizeString(ids[i]) + i++ + } + if size > maxSize { + i-- + } + return ids[:i], ids[i:] +} + +// The deadline to ack is derived from a percentile distribution based +// on the time it takes to process messages. The percentile chosen is the 99%th +// percentile - that is, processing times up to the 99%th longest processing +// times should be safe. The highest 1% may expire. This number was chosen +// as a way to cover most users' usecases without losing the value of +// expiration. +func (it *messageIterator) ackDeadline() time.Duration { + pt := time.Duration(it.ackTimeDist.Percentile(.99)) * time.Second + + if *it.maxExtensionPeriod > 0 && pt > *it.maxExtensionPeriod { + return *it.maxExtensionPeriod + } + if pt > maxAckDeadline { + return maxAckDeadline + } + if pt < minAckDeadline { + return minAckDeadline + } + return pt +} diff --git a/vendor/cloud.google.com/go/pubsub/message.go b/vendor/cloud.google.com/go/pubsub/message.go new file mode 100644 index 000000000000..051c1df18883 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/message.go @@ -0,0 +1,120 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "time" + + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// Message represents a Pub/Sub message. +type Message struct { + // ID identifies this message. + // This ID is assigned by the server and is populated for Messages obtained from a subscription. + // This field is read-only. + ID string + + // Data is the actual data in the message. + Data []byte + + // Attributes represents the key-value pairs the current message + // is labelled with. + Attributes map[string]string + + // ackID is the identifier to acknowledge this message. + ackID string + + // The time at which the message was published. + // This is populated by the server for Messages obtained from a subscription. + // This field is read-only. + PublishTime time.Time + + // receiveTime is the time the message was received by the client. + receiveTime time.Time + + // DeliveryAttempt is the number of times a message has been delivered. + // This is part of the dead lettering feature that forwards messages that + // fail to be processed (from nack/ack deadline timeout) to a dead letter topic. + // If dead lettering is enabled, this will be set on all attempts, starting + // with value 1. Otherwise, the value will be nil. + // This field is read-only. + // + // It is EXPERIMENTAL and a part of a closed alpha that may not be + // accessible to all users. This field is subject to change or removal + // without notice. + DeliveryAttempt *int + + // size is the approximate size of the message's data and attributes. + size int + + calledDone bool + + // The done method of the iterator that created this Message. + doneFunc func(string, bool, time.Time) +} + +func toMessage(resp *pb.ReceivedMessage) (*Message, error) { + if resp.Message == nil { + return &Message{ackID: resp.AckId}, nil + } + + pubTime, err := ptypes.Timestamp(resp.Message.PublishTime) + if err != nil { + return nil, err + } + + var deliveryAttempt *int + if resp.DeliveryAttempt > 0 { + da := int(resp.DeliveryAttempt) + deliveryAttempt = &da + } + + return &Message{ + ackID: resp.AckId, + Data: resp.Message.Data, + Attributes: resp.Message.Attributes, + ID: resp.Message.MessageId, + PublishTime: pubTime, + DeliveryAttempt: deliveryAttempt, + }, nil +} + +// Ack indicates successful processing of a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// If message acknowledgement fails, the Message will be redelivered. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Ack() { + m.done(true) +} + +// Nack indicates that the client will not or cannot process a Message passed to the Subscriber.Receive callback. +// It should not be called on any other Message value. +// Nack will result in the Message being redelivered more quickly than if it were allowed to expire. +// Client code must call Ack or Nack when finished for each received Message. +// Calls to Ack or Nack have no effect after the first call. +func (m *Message) Nack() { + m.done(false) +} + +func (m *Message) done(ack bool) { + if m.calledDone { + return + } + m.calledDone = true + m.doneFunc(m.ackID, ack, m.receiveTime) +} diff --git a/vendor/cloud.google.com/go/pubsub/nodebug.go b/vendor/cloud.google.com/go/pubsub/nodebug.go new file mode 100644 index 000000000000..774a74a58dfd --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/nodebug.go @@ -0,0 +1,25 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !psdebug + +package pubsub + +import "time" + +func addRecv(string, string, time.Time) {} + +func addAcks([]string) {} + +func addModAcks([]string, int32) {} diff --git a/vendor/cloud.google.com/go/pubsub/pstest/fake.go b/vendor/cloud.google.com/go/pubsub/pstest/fake.go new file mode 100644 index 000000000000..be53bcf67eea --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pstest/fake.go @@ -0,0 +1,1010 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pstest provides a fake Cloud PubSub service for testing. It implements a +// simplified form of the service, suitable for unit tests. It may behave +// differently from the actual service in ways in which the service is +// non-deterministic or unspecified: timing, delivery order, etc. +// +// This package is EXPERIMENTAL and is subject to change without notice. +// +// See the example for usage. +package pstest + +import ( + "context" + "fmt" + "io" + "path" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "cloud.google.com/go/internal/testutil" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + emptypb "github.com/golang/protobuf/ptypes/empty" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// For testing. Note that even though changes to the now variable are atomic, a call +// to the stored function can race with a change to that function. This could be a +// problem if tests are run in parallel, or even if concurrent parts of the same test +// change the value of the variable. +var now atomic.Value + +func init() { + now.Store(time.Now) + ResetMinAckDeadline() +} + +func timeNow() time.Time { + return now.Load().(func() time.Time)() +} + +// Server is a fake Pub/Sub server. +type Server struct { + srv *testutil.Server + Addr string // The address that the server is listening on. + GServer GServer // Not intended to be used directly. +} + +// GServer is the underlying service implementor. It is not intended to be used +// directly. +type GServer struct { + pb.PublisherServer + pb.SubscriberServer + + mu sync.Mutex + topics map[string]*topic + subs map[string]*subscription + msgs []*Message // all messages ever published + msgsByID map[string]*Message + wg sync.WaitGroup + nextID int + streamTimeout time.Duration +} + +// NewServer creates a new fake server running in the current process. +func NewServer() *Server { + srv, err := testutil.NewServer() + if err != nil { + panic(fmt.Sprintf("pstest.NewServer: %v", err)) + } + s := &Server{ + srv: srv, + Addr: srv.Addr, + GServer: GServer{ + topics: map[string]*topic{}, + subs: map[string]*subscription{}, + msgsByID: map[string]*Message{}, + }, + } + pb.RegisterPublisherServer(srv.Gsrv, &s.GServer) + pb.RegisterSubscriberServer(srv.Gsrv, &s.GServer) + srv.Start() + return s +} + +// Publish behaves as if the Publish RPC was called with a message with the given +// data and attrs. It returns the ID of the message. +// The topic will be created if it doesn't exist. +// +// Publish panics if there is an error, which is appropriate for testing. +func (s *Server) Publish(topic string, data []byte, attrs map[string]string) string { + const topicPattern = "projects/*/topics/*" + ok, err := path.Match(topicPattern, topic) + if err != nil { + panic(err) + } + if !ok { + panic(fmt.Sprintf("topic name must be of the form %q", topicPattern)) + } + _, _ = s.GServer.CreateTopic(context.TODO(), &pb.Topic{Name: topic}) + req := &pb.PublishRequest{ + Topic: topic, + Messages: []*pb.PubsubMessage{{Data: data, Attributes: attrs}}, + } + res, err := s.GServer.Publish(context.TODO(), req) + if err != nil { + panic(fmt.Sprintf("pstest.Server.Publish: %v", err)) + } + return res.MessageIds[0] +} + +// SetStreamTimeout sets the amount of time a stream will be active before it shuts +// itself down. This mimics the real service's behavior of closing streams after 30 +// minutes. If SetStreamTimeout is never called or is passed zero, streams never shut +// down. +func (s *Server) SetStreamTimeout(d time.Duration) { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + s.GServer.streamTimeout = d +} + +// A Message is a message that was published to the server. +type Message struct { + ID string + Data []byte + Attributes map[string]string + PublishTime time.Time + Deliveries int // number of times delivery of the message was attempted + Acks int // number of acks received from clients + + // protected by server mutex + deliveries int + acks int + Modacks []Modack // modacks received by server for this message + +} + +// Modack represents a modack sent to the server. +type Modack struct { + AckID string + AckDeadline int32 + ReceivedAt time.Time +} + +// Messages returns information about all messages ever published. +func (s *Server) Messages() []*Message { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + + var msgs []*Message + for _, m := range s.GServer.msgs { + m.Deliveries = m.deliveries + m.Acks = m.acks + msgs = append(msgs, m) + } + return msgs +} + +// Message returns the message with the given ID, or nil if no message +// with that ID was published. +func (s *Server) Message(id string) *Message { + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + + m := s.GServer.msgsByID[id] + if m != nil { + m.Deliveries = m.deliveries + m.Acks = m.acks + } + return m +} + +// Wait blocks until all server activity has completed. +func (s *Server) Wait() { + s.GServer.wg.Wait() +} + +// ClearMessages removes all published messages +// from internal containers. +func (s *Server) ClearMessages() { + s.GServer.mu.Lock() + s.GServer.msgs = nil + s.GServer.msgsByID = make(map[string]*Message) + s.GServer.mu.Unlock() +} + +// Close shuts down the server and releases all resources. +func (s *Server) Close() error { + s.srv.Close() + s.GServer.mu.Lock() + defer s.GServer.mu.Unlock() + for _, sub := range s.GServer.subs { + sub.stop() + } + return nil +} + +func (s *GServer) CreateTopic(_ context.Context, t *pb.Topic) (*pb.Topic, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if s.topics[t.Name] != nil { + return nil, status.Errorf(codes.AlreadyExists, "topic %q", t.Name) + } + top := newTopic(t) + s.topics[t.Name] = top + return top.proto, nil +} + +func (s *GServer) GetTopic(_ context.Context, req *pb.GetTopicRequest) (*pb.Topic, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if t := s.topics[req.Topic]; t != nil { + return t.proto, nil + } + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) +} + +func (s *GServer) UpdateTopic(_ context.Context, req *pb.UpdateTopicRequest) (*pb.Topic, error) { + s.mu.Lock() + defer s.mu.Unlock() + + t := s.topics[req.Topic.Name] + if t == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic.Name) + } + for _, path := range req.UpdateMask.Paths { + switch path { + case "labels": + t.proto.Labels = req.Topic.Labels + case "message_storage_policy": + t.proto.MessageStoragePolicy = req.Topic.MessageStoragePolicy + default: + return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) + } + } + return t.proto, nil +} + +func (s *GServer) ListTopics(_ context.Context, req *pb.ListTopicsRequest) (*pb.ListTopicsResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var names []string + for n := range s.topics { + if strings.HasPrefix(n, req.Project) { + names = append(names, n) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + res := &pb.ListTopicsResponse{NextPageToken: nextToken} + for i := from; i < to; i++ { + res.Topics = append(res.Topics, s.topics[names[i]].proto) + } + return res, nil +} + +func (s *GServer) ListTopicSubscriptions(_ context.Context, req *pb.ListTopicSubscriptionsRequest) (*pb.ListTopicSubscriptionsResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var names []string + for name, sub := range s.subs { + if sub.topic.proto.Name == req.Topic { + names = append(names, name) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + return &pb.ListTopicSubscriptionsResponse{ + Subscriptions: names[from:to], + NextPageToken: nextToken, + }, nil +} + +func (s *GServer) DeleteTopic(_ context.Context, req *pb.DeleteTopicRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + t := s.topics[req.Topic] + if t == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) + } + t.stop() + delete(s.topics, req.Topic) + return &emptypb.Empty{}, nil +} + +func (s *GServer) CreateSubscription(_ context.Context, ps *pb.Subscription) (*pb.Subscription, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if ps.Name == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing name") + } + if s.subs[ps.Name] != nil { + return nil, status.Errorf(codes.AlreadyExists, "subscription %q", ps.Name) + } + if ps.Topic == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing topic") + } + top := s.topics[ps.Topic] + if top == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", ps.Topic) + } + if err := checkAckDeadline(ps.AckDeadlineSeconds); err != nil { + return nil, err + } + if ps.MessageRetentionDuration == nil { + ps.MessageRetentionDuration = defaultMessageRetentionDuration + } + if err := checkMRD(ps.MessageRetentionDuration); err != nil { + return nil, err + } + if ps.PushConfig == nil { + ps.PushConfig = &pb.PushConfig{} + } + + sub := newSubscription(top, &s.mu, ps) + top.subs[ps.Name] = sub + s.subs[ps.Name] = sub + sub.start(&s.wg) + return ps, nil +} + +// Can be set for testing. +var minAckDeadlineSecs int32 + +// SetMinAckDeadline changes the minack deadline to n. Must be +// greater than or equal to 1 second. Remember to reset this value +// to the default after your test changes it. Example usage: +// pstest.SetMinAckDeadlineSecs(1) +// defer pstest.ResetMinAckDeadlineSecs() +func SetMinAckDeadline(n time.Duration) { + if n < time.Second { + panic("SetMinAckDeadline expects a value greater than 1 second") + } + + minAckDeadlineSecs = int32(n / time.Second) +} + +// ResetMinAckDeadline resets the minack deadline to the default. +func ResetMinAckDeadline() { + minAckDeadlineSecs = 10 +} + +func checkAckDeadline(ads int32) error { + if ads < minAckDeadlineSecs || ads > 600 { + // PubSub service returns Unknown. + return status.Errorf(codes.Unknown, "bad ack_deadline_seconds: %d", ads) + } + return nil +} + +const ( + minMessageRetentionDuration = 10 * time.Minute + maxMessageRetentionDuration = 168 * time.Hour +) + +var defaultMessageRetentionDuration = ptypes.DurationProto(maxMessageRetentionDuration) + +func checkMRD(pmrd *durpb.Duration) error { + mrd, err := ptypes.Duration(pmrd) + if err != nil || mrd < minMessageRetentionDuration || mrd > maxMessageRetentionDuration { + return status.Errorf(codes.InvalidArgument, "bad message_retention_duration %+v", pmrd) + } + return nil +} + +func (s *GServer) GetSubscription(_ context.Context, req *pb.GetSubscriptionRequest) (*pb.Subscription, error) { + s.mu.Lock() + defer s.mu.Unlock() + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + return sub.proto, nil +} + +func (s *GServer) UpdateSubscription(_ context.Context, req *pb.UpdateSubscriptionRequest) (*pb.Subscription, error) { + if req.Subscription == nil { + return nil, status.Errorf(codes.InvalidArgument, "missing subscription") + } + s.mu.Lock() + defer s.mu.Unlock() + sub, err := s.findSubscription(req.Subscription.Name) + if err != nil { + return nil, err + } + for _, path := range req.UpdateMask.Paths { + switch path { + case "push_config": + sub.proto.PushConfig = req.Subscription.PushConfig + + case "ack_deadline_seconds": + a := req.Subscription.AckDeadlineSeconds + if err := checkAckDeadline(a); err != nil { + return nil, err + } + sub.proto.AckDeadlineSeconds = a + + case "retain_acked_messages": + sub.proto.RetainAckedMessages = req.Subscription.RetainAckedMessages + + case "message_retention_duration": + if err := checkMRD(req.Subscription.MessageRetentionDuration); err != nil { + return nil, err + } + sub.proto.MessageRetentionDuration = req.Subscription.MessageRetentionDuration + + case "labels": + sub.proto.Labels = req.Subscription.Labels + + case "expiration_policy": + sub.proto.ExpirationPolicy = req.Subscription.ExpirationPolicy + + default: + return nil, status.Errorf(codes.InvalidArgument, "unknown field name %q", path) + } + } + return sub.proto, nil +} + +func (s *GServer) ListSubscriptions(_ context.Context, req *pb.ListSubscriptionsRequest) (*pb.ListSubscriptionsResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + var names []string + for name := range s.subs { + if strings.HasPrefix(name, req.Project) { + names = append(names, name) + } + } + sort.Strings(names) + from, to, nextToken, err := testutil.PageBounds(int(req.PageSize), req.PageToken, len(names)) + if err != nil { + return nil, err + } + res := &pb.ListSubscriptionsResponse{NextPageToken: nextToken} + for i := from; i < to; i++ { + res.Subscriptions = append(res.Subscriptions, s.subs[names[i]].proto) + } + return res, nil +} + +func (s *GServer) DeleteSubscription(_ context.Context, req *pb.DeleteSubscriptionRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + sub.stop() + delete(s.subs, req.Subscription) + sub.topic.deleteSub(sub) + return &emptypb.Empty{}, nil +} + +func (s *GServer) Publish(_ context.Context, req *pb.PublishRequest) (*pb.PublishResponse, error) { + s.mu.Lock() + defer s.mu.Unlock() + + if req.Topic == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing topic") + } + top := s.topics[req.Topic] + if top == nil { + return nil, status.Errorf(codes.NotFound, "topic %q", req.Topic) + } + var ids []string + for _, pm := range req.Messages { + id := fmt.Sprintf("m%d", s.nextID) + s.nextID++ + pm.MessageId = id + pubTime := timeNow() + tsPubTime, err := ptypes.TimestampProto(pubTime) + if err != nil { + return nil, status.Errorf(codes.Internal, err.Error()) + } + pm.PublishTime = tsPubTime + m := &Message{ + ID: id, + Data: pm.Data, + Attributes: pm.Attributes, + PublishTime: pubTime, + } + top.publish(pm, m) + ids = append(ids, id) + s.msgs = append(s.msgs, m) + s.msgsByID[id] = m + } + return &pb.PublishResponse{MessageIds: ids}, nil +} + +type topic struct { + proto *pb.Topic + subs map[string]*subscription +} + +func newTopic(pt *pb.Topic) *topic { + return &topic{ + proto: pt, + subs: map[string]*subscription{}, + } +} + +func (t *topic) stop() { + for _, sub := range t.subs { + sub.proto.Topic = "_deleted-topic_" + sub.stop() + } +} + +func (t *topic) deleteSub(sub *subscription) { + delete(t.subs, sub.proto.Name) +} + +func (t *topic) publish(pm *pb.PubsubMessage, m *Message) { + for _, s := range t.subs { + s.msgs[pm.MessageId] = &message{ + publishTime: m.PublishTime, + proto: &pb.ReceivedMessage{ + AckId: pm.MessageId, + Message: pm, + }, + deliveries: &m.deliveries, + acks: &m.acks, + streamIndex: -1, + } + } +} + +type subscription struct { + topic *topic + mu *sync.Mutex // the server mutex, here for convenience + proto *pb.Subscription + ackTimeout time.Duration + msgs map[string]*message // unacked messages by message ID + streams []*stream + done chan struct{} +} + +func newSubscription(t *topic, mu *sync.Mutex, ps *pb.Subscription) *subscription { + at := time.Duration(ps.AckDeadlineSeconds) * time.Second + if at == 0 { + at = 10 * time.Second + } + return &subscription{ + topic: t, + mu: mu, + proto: ps, + ackTimeout: at, + msgs: map[string]*message{}, + done: make(chan struct{}), + } +} + +func (s *subscription) start(wg *sync.WaitGroup) { + wg.Add(1) + go func() { + defer wg.Done() + for { + select { + case <-s.done: + return + case <-time.After(10 * time.Millisecond): + s.deliver() + } + } + }() +} + +func (s *subscription) stop() { + close(s.done) +} + +func (s *GServer) Acknowledge(_ context.Context, req *pb.AcknowledgeRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + for _, id := range req.AckIds { + sub.ack(id) + } + return &emptypb.Empty{}, nil +} + +func (s *GServer) ModifyAckDeadline(_ context.Context, req *pb.ModifyAckDeadlineRequest) (*emptypb.Empty, error) { + s.mu.Lock() + defer s.mu.Unlock() + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + now := time.Now() + for _, id := range req.AckIds { + s.msgsByID[id].Modacks = append(s.msgsByID[id].Modacks, Modack{AckID: id, AckDeadline: req.AckDeadlineSeconds, ReceivedAt: now}) + } + dur := secsToDur(req.AckDeadlineSeconds) + for _, id := range req.AckIds { + sub.modifyAckDeadline(id, dur) + } + return &emptypb.Empty{}, nil +} + +func (s *GServer) Pull(ctx context.Context, req *pb.PullRequest) (*pb.PullResponse, error) { + s.mu.Lock() + sub, err := s.findSubscription(req.Subscription) + if err != nil { + s.mu.Unlock() + return nil, err + } + max := int(req.MaxMessages) + if max < 0 { + s.mu.Unlock() + return nil, status.Error(codes.InvalidArgument, "MaxMessages cannot be negative") + } + if max == 0 { // MaxMessages not specified; use a default. + max = 1000 + } + msgs := sub.pull(max) + s.mu.Unlock() + // Implement the spec from the pubsub proto: + // "If ReturnImmediately set to true, the system will respond immediately even if + // it there are no messages available to return in the `Pull` response. + // Otherwise, the system may wait (for a bounded amount of time) until at + // least one message is available, rather than returning no messages." + if len(msgs) == 0 && !req.ReturnImmediately { + // Wait for a short amount of time for a message. + // TODO: signal when a message arrives, so we don't wait the whole time. + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-time.After(500 * time.Millisecond): + s.mu.Lock() + msgs = sub.pull(max) + s.mu.Unlock() + } + } + return &pb.PullResponse{ReceivedMessages: msgs}, nil +} + +func (s *GServer) StreamingPull(sps pb.Subscriber_StreamingPullServer) error { + // Receive initial message configuring the pull. + req, err := sps.Recv() + if err != nil { + return err + } + s.mu.Lock() + sub, err := s.findSubscription(req.Subscription) + s.mu.Unlock() + if err != nil { + return err + } + // Create a new stream to handle the pull. + st := sub.newStream(sps, s.streamTimeout) + err = st.pull(&s.wg) + sub.deleteStream(st) + return err +} + +func (s *GServer) Seek(ctx context.Context, req *pb.SeekRequest) (*pb.SeekResponse, error) { + // Only handle time-based seeking for now. + // This fake doesn't deal with snapshots. + var target time.Time + switch v := req.Target.(type) { + case nil: + return nil, status.Errorf(codes.InvalidArgument, "missing Seek target type") + case *pb.SeekRequest_Time: + var err error + target, err = ptypes.Timestamp(v.Time) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "bad Time target: %v", err) + } + default: + return nil, status.Errorf(codes.Unimplemented, "unhandled Seek target type %T", v) + } + + // The entire server must be locked while doing the work below, + // because the messages don't have any other synchronization. + s.mu.Lock() + defer s.mu.Unlock() + sub, err := s.findSubscription(req.Subscription) + if err != nil { + return nil, err + } + // Drop all messages from sub that were published before the target time. + for id, m := range sub.msgs { + if m.publishTime.Before(target) { + delete(sub.msgs, id) + (*m.acks)++ + } + } + // Un-ack any already-acked messages after this time; + // redelivering them to the subscription is the closest analogue here. + for _, m := range s.msgs { + if m.PublishTime.Before(target) { + continue + } + sub.msgs[m.ID] = &message{ + publishTime: m.PublishTime, + proto: &pb.ReceivedMessage{ + AckId: m.ID, + // This was not preserved! + //Message: pm, + }, + deliveries: &m.deliveries, + acks: &m.acks, + streamIndex: -1, + } + } + return &pb.SeekResponse{}, nil +} + +// Gets a subscription that must exist. +// Must be called with the lock held. +func (s *GServer) findSubscription(name string) (*subscription, error) { + if name == "" { + return nil, status.Errorf(codes.InvalidArgument, "missing subscription") + } + sub := s.subs[name] + if sub == nil { + return nil, status.Errorf(codes.NotFound, "subscription %s", name) + } + return sub, nil +} + +// Must be called with the lock held. +func (s *subscription) pull(max int) []*pb.ReceivedMessage { + now := timeNow() + s.maintainMessages(now) + var msgs []*pb.ReceivedMessage + for _, m := range s.msgs { + if m.outstanding() { + continue + } + (*m.deliveries)++ + m.ackDeadline = now.Add(s.ackTimeout) + msgs = append(msgs, m.proto) + if len(msgs) >= max { + break + } + } + return msgs +} + +func (s *subscription) deliver() { + s.mu.Lock() + defer s.mu.Unlock() + + now := timeNow() + s.maintainMessages(now) + // Try to deliver each remaining message. + curIndex := 0 + for _, m := range s.msgs { + if m.outstanding() { + continue + } + // If the message was never delivered before, start with the stream at + // curIndex. If it was delivered before, start with the stream after the one + // that owned it. + if m.streamIndex < 0 { + delIndex, ok := s.tryDeliverMessage(m, curIndex, now) + if !ok { + break + } + curIndex = delIndex + 1 + m.streamIndex = curIndex + } else { + delIndex, ok := s.tryDeliverMessage(m, m.streamIndex, now) + if !ok { + break + } + m.streamIndex = delIndex + } + } +} + +// tryDeliverMessage attempts to deliver m to the stream at index i. If it can't, it +// tries streams i+1, i+2, ..., wrapping around. Once it's tried all streams, it +// exits. +// +// It returns the index of the stream it delivered the message to, or 0, false if +// it didn't deliver the message. +// +// Must be called with the lock held. +func (s *subscription) tryDeliverMessage(m *message, start int, now time.Time) (int, bool) { + for i := 0; i < len(s.streams); i++ { + idx := (i + start) % len(s.streams) + + st := s.streams[idx] + select { + case <-st.done: + s.streams = deleteStreamAt(s.streams, idx) + i-- + + case st.msgc <- m.proto: + (*m.deliveries)++ + m.ackDeadline = now.Add(st.ackTimeout) + return idx, true + + default: + } + } + return 0, false +} + +var retentionDuration = 10 * time.Minute + +// Must be called with the lock held. +func (s *subscription) maintainMessages(now time.Time) { + for id, m := range s.msgs { + // Mark a message as re-deliverable if its ack deadline has expired. + if m.outstanding() && now.After(m.ackDeadline) { + m.makeAvailable() + } + pubTime, err := ptypes.Timestamp(m.proto.Message.PublishTime) + if err != nil { + panic(err) + } + // Remove messages that have been undelivered for a long time. + if !m.outstanding() && now.Sub(pubTime) > retentionDuration { + delete(s.msgs, id) + } + } +} + +func (s *subscription) newStream(gs pb.Subscriber_StreamingPullServer, timeout time.Duration) *stream { + st := &stream{ + sub: s, + done: make(chan struct{}), + msgc: make(chan *pb.ReceivedMessage), + gstream: gs, + ackTimeout: s.ackTimeout, + timeout: timeout, + } + s.mu.Lock() + s.streams = append(s.streams, st) + s.mu.Unlock() + return st +} + +func (s *subscription) deleteStream(st *stream) { + s.mu.Lock() + defer s.mu.Unlock() + var i int + for i = 0; i < len(s.streams); i++ { + if s.streams[i] == st { + break + } + } + if i < len(s.streams) { + s.streams = deleteStreamAt(s.streams, i) + } +} +func deleteStreamAt(s []*stream, i int) []*stream { + // Preserve order for round-robin delivery. + return append(s[:i], s[i+1:]...) +} + +type message struct { + proto *pb.ReceivedMessage + publishTime time.Time + ackDeadline time.Time + deliveries *int + acks *int + streamIndex int // index of stream that currently owns msg, for round-robin delivery +} + +// A message is outstanding if it is owned by some stream. +func (m *message) outstanding() bool { + return !m.ackDeadline.IsZero() +} + +func (m *message) makeAvailable() { + m.ackDeadline = time.Time{} +} + +type stream struct { + sub *subscription + done chan struct{} // closed when the stream is finished + msgc chan *pb.ReceivedMessage + gstream pb.Subscriber_StreamingPullServer + ackTimeout time.Duration + timeout time.Duration +} + +// pull manages the StreamingPull interaction for the life of the stream. +func (st *stream) pull(wg *sync.WaitGroup) error { + errc := make(chan error, 2) + wg.Add(2) + go func() { + defer wg.Done() + errc <- st.sendLoop() + }() + go func() { + defer wg.Done() + errc <- st.recvLoop() + }() + var tchan <-chan time.Time + if st.timeout > 0 { + tchan = time.After(st.timeout) + } + // Wait until one of the goroutines returns an error, or we time out. + var err error + select { + case err = <-errc: + if err == io.EOF { + err = nil + } + case <-tchan: + } + close(st.done) // stop the other goroutine + return err +} + +func (st *stream) sendLoop() error { + for { + select { + case <-st.done: + return nil + case rm := <-st.msgc: + res := &pb.StreamingPullResponse{ReceivedMessages: []*pb.ReceivedMessage{rm}} + if err := st.gstream.Send(res); err != nil { + return err + } + } + } +} + +func (st *stream) recvLoop() error { + for { + req, err := st.gstream.Recv() + if err != nil { + return err + } + st.sub.handleStreamingPullRequest(st, req) + } +} + +func (s *subscription) handleStreamingPullRequest(st *stream, req *pb.StreamingPullRequest) { + // Lock the entire server. + s.mu.Lock() + defer s.mu.Unlock() + + for _, ackID := range req.AckIds { + s.ack(ackID) + } + for i, id := range req.ModifyDeadlineAckIds { + s.modifyAckDeadline(id, secsToDur(req.ModifyDeadlineSeconds[i])) + } + if req.StreamAckDeadlineSeconds > 0 { + st.ackTimeout = secsToDur(req.StreamAckDeadlineSeconds) + } +} + +// Must be called with the lock held. +func (s *subscription) ack(id string) { + m := s.msgs[id] + if m != nil { + (*m.acks)++ + delete(s.msgs, id) + } +} + +// Must be called with the lock held. +func (s *subscription) modifyAckDeadline(id string, d time.Duration) { + m := s.msgs[id] + if m == nil { // already acked: ignore. + return + } + if d == 0 { // nack + m.makeAvailable() + } else { // extend the deadline by d + m.ackDeadline = timeNow().Add(d) + } +} + +func secsToDur(secs int32) time.Duration { + return time.Duration(secs) * time.Second +} diff --git a/vendor/cloud.google.com/go/pubsub/pubsub.go b/vendor/cloud.google.com/go/pubsub/pubsub.go new file mode 100644 index 000000000000..a14f6699784b --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pubsub.go @@ -0,0 +1,112 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub // import "cloud.google.com/go/pubsub" + +import ( + "context" + "fmt" + "os" + "runtime" + "time" + + "cloud.google.com/go/internal/version" + vkit "cloud.google.com/go/pubsub/apiv1" + "google.golang.org/api/option" + "google.golang.org/grpc" + "google.golang.org/grpc/keepalive" +) + +const ( + // ScopePubSub grants permissions to view and manage Pub/Sub + // topics and subscriptions. + ScopePubSub = "https://www.googleapis.com/auth/pubsub" + + // ScopeCloudPlatform grants permissions to view and manage your data + // across Google Cloud Platform services. + ScopeCloudPlatform = "https://www.googleapis.com/auth/cloud-platform" + + maxAckDeadline = 10 * time.Minute +) + +// Client is a Google Pub/Sub client scoped to a single project. +// +// Clients should be reused rather than being created as needed. +// A Client may be shared by multiple goroutines. +type Client struct { + projectID string + pubc *vkit.PublisherClient + subc *vkit.SubscriberClient +} + +// NewClient creates a new PubSub client. +func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (c *Client, err error) { + var o []option.ClientOption + // Environment variables for gcloud emulator: + // https://cloud.google.com/sdk/gcloud/reference/beta/emulators/pubsub/ + if addr := os.Getenv("PUBSUB_EMULATOR_HOST"); addr != "" { + conn, err := grpc.Dial(addr, grpc.WithInsecure()) + if err != nil { + return nil, fmt.Errorf("grpc.Dial: %v", err) + } + o = []option.ClientOption{option.WithGRPCConn(conn)} + } else { + numConns := runtime.GOMAXPROCS(0) + if numConns > 4 { + numConns = 4 + } + o = []option.ClientOption{ + // Create multiple connections to increase throughput. + option.WithGRPCConnectionPool(numConns), + option.WithGRPCDialOption(grpc.WithKeepaliveParams(keepalive.ClientParameters{ + Time: 5 * time.Minute, + })), + } + o = append(o, openCensusOptions()...) + } + o = append(o, opts...) + pubc, err := vkit.NewPublisherClient(ctx, o...) + if err != nil { + return nil, fmt.Errorf("pubsub: %v", err) + } + subc, err := vkit.NewSubscriberClient(ctx, option.WithGRPCConn(pubc.Connection())) + if err != nil { + // Should never happen, since we are passing in the connection. + // If it does, we cannot close, because the user may have passed in their + // own connection originally. + return nil, fmt.Errorf("pubsub: %v", err) + } + pubc.SetGoogleClientInfo("gccl", version.Repo) + return &Client{ + projectID: projectID, + pubc: pubc, + subc: subc, + }, nil +} + +// Close releases any resources held by the client, +// such as memory and goroutines. +// +// If the client is available for the lifetime of the program, then Close need not be +// called at exit. +func (c *Client) Close() error { + // Return the first error, because the first call closes the connection. + err := c.pubc.Close() + _ = c.subc.Close() + return err +} + +func (c *Client) fullyQualifiedProjectName() string { + return fmt.Sprintf("projects/%s", c.projectID) +} diff --git a/vendor/cloud.google.com/go/pubsub/pullstream.go b/vendor/cloud.google.com/go/pubsub/pullstream.go new file mode 100644 index 000000000000..dd0e6c6805be --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/pullstream.go @@ -0,0 +1,189 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "io" + "sync" + "time" + + gax "github.com/googleapis/gax-go/v2" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc" +) + +// A pullStream supports the methods of a StreamingPullClient, but re-opens +// the stream on a retryable error. +type pullStream struct { + ctx context.Context + open func() (pb.Subscriber_StreamingPullClient, error) + + mu sync.Mutex + spc *pb.Subscriber_StreamingPullClient + err error // permanent error +} + +// for testing +type streamingPullFunc func(context.Context, ...gax.CallOption) (pb.Subscriber_StreamingPullClient, error) + +func newPullStream(ctx context.Context, streamingPull streamingPullFunc, subName string) *pullStream { + ctx = withSubscriptionKey(ctx, subName) + return &pullStream{ + ctx: ctx, + open: func() (pb.Subscriber_StreamingPullClient, error) { + spc, err := streamingPull(ctx, gax.WithGRPCOptions(grpc.MaxCallRecvMsgSize(maxSendRecvBytes))) + if err == nil { + recordStat(ctx, StreamRequestCount, 1) + err = spc.Send(&pb.StreamingPullRequest{ + Subscription: subName, + // We modack messages when we receive them, so this value doesn't matter too much. + StreamAckDeadlineSeconds: 60, + }) + } + if err != nil { + return nil, err + } + return spc, nil + }, + } +} + +// get returns either a valid *StreamingPullClient (SPC), or a permanent error. +// If the argument is nil, this is the first call for an RPC, and the current +// SPC will be returned (or a new one will be opened). Otherwise, this call is a +// request to re-open the stream because of a retryable error, and the argument +// is a pointer to the SPC that returned the error. +func (s *pullStream) get(spc *pb.Subscriber_StreamingPullClient) (*pb.Subscriber_StreamingPullClient, error) { + s.mu.Lock() + defer s.mu.Unlock() + // A stored error is permanent. + if s.err != nil { + return nil, s.err + } + // If the context is done, so are we. + s.err = s.ctx.Err() + if s.err != nil { + return nil, s.err + } + + // If the current and argument SPCs differ, return the current one. This subsumes two cases: + // 1. We have an SPC and the caller is getting the stream for the first time. + // 2. The caller wants to retry, but they have an older SPC; we've already retried. + if spc != s.spc { + return s.spc, nil + } + // Either this is the very first call on this stream (s.spc == nil), or we have a valid + // retry request. Either way, open a new stream. + // The lock is held here for a long time, but it doesn't matter because no callers could get + // anything done anyway. + s.spc = new(pb.Subscriber_StreamingPullClient) + *s.spc, s.err = s.openWithRetry() // Any error from openWithRetry is permanent. + return s.spc, s.err +} + +func (s *pullStream) openWithRetry() (pb.Subscriber_StreamingPullClient, error) { + r := defaultRetryer{} + for { + recordStat(s.ctx, StreamOpenCount, 1) + spc, err := s.open() + bo, shouldRetry := r.Retry(err) + if err != nil && shouldRetry { + recordStat(s.ctx, StreamRetryCount, 1) + if err := gax.Sleep(s.ctx, bo); err != nil { + return nil, err + } + continue + } + return spc, err + } +} + +func (s *pullStream) call(f func(pb.Subscriber_StreamingPullClient) error, opts ...gax.CallOption) error { + var settings gax.CallSettings + for _, opt := range opts { + opt.Resolve(&settings) + } + var r gax.Retryer = &defaultRetryer{} + if settings.Retry != nil { + r = settings.Retry() + } + + var ( + spc *pb.Subscriber_StreamingPullClient + err error + ) + for { + spc, err = s.get(spc) + if err != nil { + return err + } + start := time.Now() + err = f(*spc) + if err != nil { + bo, shouldRetry := r.Retry(err) + if shouldRetry { + recordStat(s.ctx, StreamRetryCount, 1) + if time.Since(start) < 30*time.Second { // don't sleep if we've been blocked for a while + if err := gax.Sleep(s.ctx, bo); err != nil { + return err + } + } + continue + } + s.mu.Lock() + s.err = err + s.mu.Unlock() + } + return err + } +} + +func (s *pullStream) Send(req *pb.StreamingPullRequest) error { + return s.call(func(spc pb.Subscriber_StreamingPullClient) error { + recordStat(s.ctx, AckCount, int64(len(req.AckIds))) + zeroes := 0 + for _, mds := range req.ModifyDeadlineSeconds { + if mds == 0 { + zeroes++ + } + } + recordStat(s.ctx, NackCount, int64(zeroes)) + recordStat(s.ctx, ModAckCount, int64(len(req.ModifyDeadlineSeconds)-zeroes)) + recordStat(s.ctx, StreamRequestCount, 1) + return spc.Send(req) + }) +} + +func (s *pullStream) Recv() (*pb.StreamingPullResponse, error) { + var res *pb.StreamingPullResponse + err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { + var err error + recordStat(s.ctx, StreamResponseCount, 1) + res, err = spc.Recv() + return err + }, gax.WithRetry(func() gax.Retryer { return &streamingPullRetryer{defaultRetryer: &defaultRetryer{}} })) + return res, err +} + +func (s *pullStream) CloseSend() error { + err := s.call(func(spc pb.Subscriber_StreamingPullClient) error { + return spc.CloseSend() + }) + s.mu.Lock() + s.err = io.EOF // should not be retried + s.mu.Unlock() + return err +} diff --git a/vendor/cloud.google.com/go/pubsub/service.go b/vendor/cloud.google.com/go/pubsub/service.go new file mode 100644 index 000000000000..a22b9147fccc --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/service.go @@ -0,0 +1,100 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "fmt" + "math" + "strings" + "time" + + gax "github.com/googleapis/gax-go/v2" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// maxPayload is the maximum number of bytes to devote to the +// encoded AcknowledgementRequest / ModifyAckDeadline proto message. +// +// With gRPC there is no way for the client to know the server's max message size (it is +// configurable on the server). We know from experience that it +// it 512K. +const ( + maxPayload = 512 * 1024 + maxSendRecvBytes = 20 * 1024 * 1024 // 20M +) + +func convertMessages(rms []*pb.ReceivedMessage) ([]*Message, error) { + msgs := make([]*Message, 0, len(rms)) + for i, m := range rms { + msg, err := toMessage(m) + if err != nil { + return nil, fmt.Errorf("pubsub: cannot decode the retrieved message at index: %d, message: %+v", i, m) + } + msgs = append(msgs, msg) + } + return msgs, nil +} + +func trunc32(i int64) int32 { + if i > math.MaxInt32 { + i = math.MaxInt32 + } + return int32(i) +} + +type defaultRetryer struct { + bo gax.Backoff +} + +// Logic originally from +// https://github.com/GoogleCloudPlatform/google-cloud-java/blob/master/google-cloud-clients/google-cloud-pubsub/src/main/java/com/google/cloud/pubsub/v1/StatusUtil.java +func (r *defaultRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { + s, ok := status.FromError(err) + if !ok { // includes io.EOF, normal stream close, which causes us to reopen + return r.bo.Pause(), true + } + switch s.Code() { + case codes.DeadlineExceeded, codes.Internal, codes.ResourceExhausted, codes.Aborted: + return r.bo.Pause(), true + case codes.Unavailable: + c := strings.Contains(s.Message(), "Server shutdownNow invoked") + if !c { + return r.bo.Pause(), true + } + return 0, false + default: + return 0, false + } +} + +type streamingPullRetryer struct { + defaultRetryer gax.Retryer +} + +// Does not retry ResourceExhausted. See: https://github.com/GoogleCloudPlatform/google-cloud-go/issues/1166#issuecomment-443744705 +func (r *streamingPullRetryer) Retry(err error) (pause time.Duration, shouldRetry bool) { + s, ok := status.FromError(err) + if !ok { // call defaultRetryer so that its backoff can be used + return r.defaultRetryer.Retry(err) + } + switch s.Code() { + case codes.ResourceExhausted: + return 0, false + default: + return r.defaultRetryer.Retry(err) + } +} diff --git a/vendor/cloud.google.com/go/pubsub/snapshot.go b/vendor/cloud.google.com/go/pubsub/snapshot.go new file mode 100644 index 000000000000..c2a28d781990 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/snapshot.go @@ -0,0 +1,160 @@ +// Copyright 2017 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/golang/protobuf/ptypes" + pb "google.golang.org/genproto/googleapis/pubsub/v1" +) + +// Snapshot is a reference to a PubSub snapshot. +type Snapshot struct { + c *Client + + // The fully qualified identifier for the snapshot, in the format "projects//snapshots/" + name string +} + +// ID returns the unique identifier of the snapshot within its project. +func (s *Snapshot) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad snapshot name") + } + return s.name[slash+1:] +} + +// SnapshotConfig contains the details of a Snapshot. +type SnapshotConfig struct { + *Snapshot + Topic *Topic + Expiration time.Time +} + +// Snapshot creates a reference to a snapshot. +func (c *Client) Snapshot(id string) *Snapshot { + return &Snapshot{ + c: c, + name: fmt.Sprintf("projects/%s/snapshots/%s", c.projectID, id), + } +} + +// Snapshots returns an iterator which returns snapshots for this project. +func (c *Client) Snapshots(ctx context.Context) *SnapshotConfigIterator { + it := c.subc.ListSnapshots(ctx, &pb.ListSnapshotsRequest{ + Project: c.fullyQualifiedProjectName(), + }) + next := func() (*SnapshotConfig, error) { + snap, err := it.Next() + if err != nil { + return nil, err + } + return toSnapshotConfig(snap, c) + } + return &SnapshotConfigIterator{next: next} +} + +// SnapshotConfigIterator is an iterator that returns a series of snapshots. +type SnapshotConfigIterator struct { + next func() (*SnapshotConfig, error) +} + +// Next returns the next SnapshotConfig. Its second return value is iterator.Done if there are no more results. +// Once Next returns iterator.Done, all subsequent calls will return iterator.Done. +func (snaps *SnapshotConfigIterator) Next() (*SnapshotConfig, error) { + return snaps.next() +} + +// Delete deletes a snapshot. +func (s *Snapshot) Delete(ctx context.Context) error { + return s.c.subc.DeleteSnapshot(ctx, &pb.DeleteSnapshotRequest{Snapshot: s.name}) +} + +// SeekToTime seeks the subscription to a point in time. +// +// Messages retained in the subscription that were published before this +// time are marked as acknowledged, and messages retained in the +// subscription that were published after this time are marked as +// unacknowledged. Note that this operation affects only those messages +// retained in the subscription (configured by SnapshotConfig). For example, +// if `time` corresponds to a point before the message retention +// window (or to a point before the system's notion of the subscription +// creation time), only retained messages will be marked as unacknowledged, +// and already-expunged messages will not be restored. +func (s *Subscription) SeekToTime(ctx context.Context, t time.Time) error { + ts, err := ptypes.TimestampProto(t) + if err != nil { + return err + } + _, err = s.c.subc.Seek(ctx, &pb.SeekRequest{ + Subscription: s.name, + Target: &pb.SeekRequest_Time{Time: ts}, + }) + return err +} + +// CreateSnapshot creates a new snapshot from this subscription. +// The snapshot will be for the topic this subscription is subscribed to. +// If the name is empty string, a unique name is assigned. +// +// The created snapshot is guaranteed to retain: +// (a) The existing backlog on the subscription. More precisely, this is +// defined as the messages in the subscription's backlog that are +// unacknowledged when Snapshot returns without error. +// (b) Any messages published to the subscription's topic following +// Snapshot returning without error. +func (s *Subscription) CreateSnapshot(ctx context.Context, name string) (*SnapshotConfig, error) { + if name != "" { + name = fmt.Sprintf("projects/%s/snapshots/%s", strings.Split(s.name, "/")[1], name) + } + snap, err := s.c.subc.CreateSnapshot(ctx, &pb.CreateSnapshotRequest{ + Name: name, + Subscription: s.name, + }) + if err != nil { + return nil, err + } + return toSnapshotConfig(snap, s.c) +} + +// SeekToSnapshot seeks the subscription to a snapshot. +// +// The snapshot need not be created from this subscription, +// but it must be for the topic this subscription is subscribed to. +func (s *Subscription) SeekToSnapshot(ctx context.Context, snap *Snapshot) error { + _, err := s.c.subc.Seek(ctx, &pb.SeekRequest{ + Subscription: s.name, + Target: &pb.SeekRequest_Snapshot{Snapshot: snap.name}, + }) + return err +} + +func toSnapshotConfig(snap *pb.Snapshot, c *Client) (*SnapshotConfig, error) { + exp, err := ptypes.Timestamp(snap.ExpireTime) + if err != nil { + return nil, err + } + return &SnapshotConfig{ + Snapshot: &Snapshot{c: c, name: snap.Name}, + Topic: newTopic(c, snap.Topic), + Expiration: exp, + }, nil +} diff --git a/vendor/cloud.google.com/go/pubsub/subscription.go b/vendor/cloud.google.com/go/pubsub/subscription.go new file mode 100644 index 000000000000..045ddef6109c --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/subscription.go @@ -0,0 +1,801 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "errors" + "fmt" + "io" + "strings" + "sync" + "time" + + "cloud.google.com/go/iam" + "cloud.google.com/go/internal/optional" + "github.com/golang/protobuf/ptypes" + durpb "github.com/golang/protobuf/ptypes/duration" + gax "github.com/googleapis/gax-go/v2" + "golang.org/x/sync/errgroup" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + fmpb "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// Subscription is a reference to a PubSub subscription. +type Subscription struct { + c *Client + + // The fully qualified identifier for the subscription, in the format "projects//subscriptions/" + name string + + // Settings for pulling messages. Configure these before calling Receive. + ReceiveSettings ReceiveSettings + + mu sync.Mutex + receiveActive bool +} + +// Subscription creates a reference to a subscription. +func (c *Client) Subscription(id string) *Subscription { + return c.SubscriptionInProject(id, c.projectID) +} + +// SubscriptionInProject creates a reference to a subscription in a given project. +func (c *Client) SubscriptionInProject(id, projectID string) *Subscription { + return &Subscription{ + c: c, + name: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, id), + } +} + +// String returns the globally unique printable name of the subscription. +func (s *Subscription) String() string { + return s.name +} + +// ID returns the unique identifier of the subscription within its project. +func (s *Subscription) ID() string { + slash := strings.LastIndex(s.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad subscription name") + } + return s.name[slash+1:] +} + +// Subscriptions returns an iterator which returns all of the subscriptions for the client's project. +func (c *Client) Subscriptions(ctx context.Context) *SubscriptionIterator { + it := c.subc.ListSubscriptions(ctx, &pb.ListSubscriptionsRequest{ + Project: c.fullyQualifiedProjectName(), + }) + return &SubscriptionIterator{ + c: c, + next: func() (string, error) { + sub, err := it.Next() + if err != nil { + return "", err + } + return sub.Name, nil + }, + } +} + +// SubscriptionIterator is an iterator that returns a series of subscriptions. +type SubscriptionIterator struct { + c *Client + next func() (string, error) +} + +// Next returns the next subscription. If there are no more subscriptions, iterator.Done will be returned. +func (subs *SubscriptionIterator) Next() (*Subscription, error) { + subName, err := subs.next() + if err != nil { + return nil, err + } + return &Subscription{c: subs.c, name: subName}, nil +} + +// PushConfig contains configuration for subscriptions that operate in push mode. +type PushConfig struct { + // A URL locating the endpoint to which messages should be pushed. + Endpoint string + + // Endpoint configuration attributes. See https://cloud.google.com/pubsub/docs/reference/rest/v1/projects.subscriptions#pushconfig for more details. + Attributes map[string]string + + // AuthenticationMethod is used by push endpoints to verify the source + // of push requests. + // It can be used with push endpoints that are private by default to + // allow requests only from the Cloud Pub/Sub system, for example. + // This field is optional and should be set only by users interested in + // authenticated push. + AuthenticationMethod AuthenticationMethod +} + +func (pc *PushConfig) toProto() *pb.PushConfig { + if pc == nil { + return nil + } + pbCfg := &pb.PushConfig{ + Attributes: pc.Attributes, + PushEndpoint: pc.Endpoint, + } + if authMethod := pc.AuthenticationMethod; authMethod != nil { + switch am := authMethod.(type) { + case *OIDCToken: + pbCfg.AuthenticationMethod = am.toProto() + default: // TODO: add others here when GAIC adds more definitions. + } + } + return pbCfg +} + +// AuthenticationMethod is used by push points to verify the source of push requests. +// This interface defines fields that are part of a closed alpha that may not be accessible +// to all users. +type AuthenticationMethod interface { + isAuthMethod() bool +} + +// OIDCToken allows PushConfigs to be authenticated using +// the OpenID Connect protocol https://openid.net/connect/ +type OIDCToken struct { + // Audience to be used when generating OIDC token. The audience claim + // identifies the recipients that the JWT is intended for. The audience + // value is a single case-sensitive string. Having multiple values (array) + // for the audience field is not supported. More info about the OIDC JWT + // token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 + // Note: if not specified, the Push endpoint URL will be used. + Audience string + + // The service account email to be used for generating the OpenID Connect token. + // The caller of: + // * CreateSubscription + // * UpdateSubscription + // * ModifyPushConfig + // calls must have the iam.serviceAccounts.actAs permission for the service account. + // See https://cloud.google.com/iam/docs/understanding-roles#service-accounts-roles. + ServiceAccountEmail string +} + +var _ AuthenticationMethod = (*OIDCToken)(nil) + +func (oidcToken *OIDCToken) isAuthMethod() bool { return true } + +func (oidcToken *OIDCToken) toProto() *pb.PushConfig_OidcToken_ { + if oidcToken == nil { + return nil + } + return &pb.PushConfig_OidcToken_{ + OidcToken: &pb.PushConfig_OidcToken{ + Audience: oidcToken.Audience, + ServiceAccountEmail: oidcToken.ServiceAccountEmail, + }, + } +} + +// SubscriptionConfig describes the configuration of a subscription. +type SubscriptionConfig struct { + Topic *Topic + PushConfig PushConfig + + // The default maximum time after a subscriber receives a message before + // the subscriber should acknowledge the message. Note: messages which are + // obtained via Subscription.Receive need not be acknowledged within this + // deadline, as the deadline will be automatically extended. + AckDeadline time.Duration + + // Whether to retain acknowledged messages. If true, acknowledged messages + // will not be expunged until they fall out of the RetentionDuration window. + RetainAckedMessages bool + + // How long to retain messages in backlog, from the time of publish. If + // RetainAckedMessages is true, this duration affects the retention of + // acknowledged messages, otherwise only unacknowledged messages are retained. + // Defaults to 7 days. Cannot be longer than 7 days or shorter than 10 minutes. + RetentionDuration time.Duration + + // Expiration policy specifies the conditions for a subscription's expiration. + // A subscription is considered active as long as any connected subscriber is + // successfully consuming messages from the subscription or is issuing + // operations on the subscription. If `expiration_policy` is not set, a + // *default policy* with `ttl` of 31 days will be used. The minimum allowed + // value for `expiration_policy.ttl` is 1 day. + // + // Use time.Duration(0) to indicate that the subscription should never expire. + ExpirationPolicy optional.Duration + + // The set of labels for the subscription. + Labels map[string]string + + // DeadLetterPolicy specifies the conditions for dead lettering messages in + // a subscription. If not set, dead lettering is disabled. + // + // It is EXPERIMENTAL and a part of a closed alpha that may not be + // accessible to all users. This field is subject to change or removal + // without notice. + DeadLetterPolicy *DeadLetterPolicy +} + +func (cfg *SubscriptionConfig) toProto(name string) *pb.Subscription { + var pbPushConfig *pb.PushConfig + if cfg.PushConfig.Endpoint != "" || len(cfg.PushConfig.Attributes) != 0 || cfg.PushConfig.AuthenticationMethod != nil { + pbPushConfig = cfg.PushConfig.toProto() + } + var retentionDuration *durpb.Duration + if cfg.RetentionDuration != 0 { + retentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + } + var pbDeadLetter *pb.DeadLetterPolicy + if cfg.DeadLetterPolicy != nil { + pbDeadLetter = cfg.DeadLetterPolicy.toProto() + } + return &pb.Subscription{ + Name: name, + Topic: cfg.Topic.name, + PushConfig: pbPushConfig, + AckDeadlineSeconds: trunc32(int64(cfg.AckDeadline.Seconds())), + RetainAckedMessages: cfg.RetainAckedMessages, + MessageRetentionDuration: retentionDuration, + Labels: cfg.Labels, + ExpirationPolicy: expirationPolicyToProto(cfg.ExpirationPolicy), + DeadLetterPolicy: pbDeadLetter, + } +} + +func protoToSubscriptionConfig(pbSub *pb.Subscription, c *Client) (SubscriptionConfig, error) { + rd := time.Hour * 24 * 7 + var err error + if pbSub.MessageRetentionDuration != nil { + rd, err = ptypes.Duration(pbSub.MessageRetentionDuration) + if err != nil { + return SubscriptionConfig{}, err + } + } + var expirationPolicy time.Duration + if ttl := pbSub.ExpirationPolicy.GetTtl(); ttl != nil { + expirationPolicy, err = ptypes.Duration(ttl) + if err != nil { + return SubscriptionConfig{}, err + } + } + dlp := protoToDLP(pbSub.DeadLetterPolicy) + subC := SubscriptionConfig{ + Topic: newTopic(c, pbSub.Topic), + AckDeadline: time.Second * time.Duration(pbSub.AckDeadlineSeconds), + RetainAckedMessages: pbSub.RetainAckedMessages, + RetentionDuration: rd, + Labels: pbSub.Labels, + ExpirationPolicy: expirationPolicy, + DeadLetterPolicy: dlp, + } + pc := protoToPushConfig(pbSub.PushConfig) + if pc != nil { + subC.PushConfig = *pc + } + return subC, nil +} + +func protoToPushConfig(pbPc *pb.PushConfig) *PushConfig { + if pbPc == nil { + return nil + } + pc := &PushConfig{ + Endpoint: pbPc.PushEndpoint, + Attributes: pbPc.Attributes, + } + if am := pbPc.AuthenticationMethod; am != nil { + if oidcToken, ok := am.(*pb.PushConfig_OidcToken_); ok && oidcToken != nil && oidcToken.OidcToken != nil { + pc.AuthenticationMethod = &OIDCToken{ + Audience: oidcToken.OidcToken.GetAudience(), + ServiceAccountEmail: oidcToken.OidcToken.GetServiceAccountEmail(), + } + } + } + return pc +} + +// DeadLetterPolicy specifies the conditions for dead lettering messages in +// a subscription. +// +// It is EXPERIMENTAL and a part of a closed alpha that may not be +// accessible to all users. +type DeadLetterPolicy struct { + DeadLetterTopic string + MaxDeliveryAttempts int +} + +func (dlp *DeadLetterPolicy) toProto() *pb.DeadLetterPolicy { + if dlp == nil || dlp.DeadLetterTopic == "" { + return nil + } + return &pb.DeadLetterPolicy{ + DeadLetterTopic: dlp.DeadLetterTopic, + MaxDeliveryAttempts: int32(dlp.MaxDeliveryAttempts), + } +} +func protoToDLP(pbDLP *pb.DeadLetterPolicy) *DeadLetterPolicy { + if pbDLP == nil { + return nil + } + return &DeadLetterPolicy{ + DeadLetterTopic: pbDLP.GetDeadLetterTopic(), + MaxDeliveryAttempts: int(pbDLP.MaxDeliveryAttempts), + } +} + +// ReceiveSettings configure the Receive method. +// A zero ReceiveSettings will result in values equivalent to DefaultReceiveSettings. +type ReceiveSettings struct { + // MaxExtension is the maximum period for which the Subscription should + // automatically extend the ack deadline for each message. + // + // The Subscription will automatically extend the ack deadline of all + // fetched Messages up to the duration specified. Automatic deadline + // extension beyond the initial receipt may be disabled by specifying a + // duration less than 0. + MaxExtension time.Duration + + // MaxExtensionPeriod is the maximum duration by which to extend the ack + // deadline at a time. The ack deadline will continue to be extended by up + // to this duration until MaxExtension is reached. Setting MaxExtensionPeriod + // bounds the maximum amount of time before a message redelivery in the + // event the subscriber fails to extend the deadline. + // + // MaxExtensionPeriod configuration can be disabled by specifying a + // duration less than (or equal to) 0. + MaxExtensionPeriod time.Duration + + // MaxOutstandingMessages is the maximum number of unprocessed messages + // (unacknowledged but not yet expired). If MaxOutstandingMessages is 0, it + // will be treated as if it were DefaultReceiveSettings.MaxOutstandingMessages. + // If the value is negative, then there will be no limit on the number of + // unprocessed messages. + MaxOutstandingMessages int + + // MaxOutstandingBytes is the maximum size of unprocessed messages + // (unacknowledged but not yet expired). If MaxOutstandingBytes is 0, it will + // be treated as if it were DefaultReceiveSettings.MaxOutstandingBytes. If + // the value is negative, then there will be no limit on the number of bytes + // for unprocessed messages. + MaxOutstandingBytes int + + // NumGoroutines is the number of goroutines Receive will spawn to pull + // messages concurrently. If NumGoroutines is less than 1, it will be treated + // as if it were DefaultReceiveSettings.NumGoroutines. + // + // NumGoroutines does not limit the number of messages that can be processed + // concurrently. Even with one goroutine, many messages might be processed at + // once, because that goroutine may continually receive messages and invoke the + // function passed to Receive on them. To limit the number of messages being + // processed concurrently, set MaxOutstandingMessages. + NumGoroutines int + + // If Synchronous is true, then no more than MaxOutstandingMessages will be in + // memory at one time. (In contrast, when Synchronous is false, more than + // MaxOutstandingMessages may have been received from the service and in memory + // before being processed.) MaxOutstandingBytes still refers to the total bytes + // processed, rather than in memory. NumGoroutines is ignored. + // The default is false. + Synchronous bool +} + +// For synchronous receive, the time to wait if we are already processing +// MaxOutstandingMessages. There is no point calling Pull and asking for zero +// messages, so we pause to allow some message-processing callbacks to finish. +// +// The wait time is large enough to avoid consuming significant CPU, but +// small enough to provide decent throughput. Users who want better +// throughput should not be using synchronous mode. +// +// Waiting might seem like polling, so it's natural to think we could do better by +// noticing when a callback is finished and immediately calling Pull. But if +// callbacks finish in quick succession, this will result in frequent Pull RPCs that +// request a single message, which wastes network bandwidth. Better to wait for a few +// callbacks to finish, so we make fewer RPCs fetching more messages. +// +// This value is unexported so the user doesn't have another knob to think about. Note that +// it is the same value as the one used for nackTicker, so it matches this client's +// idea of a duration that is short, but not so short that we perform excessive RPCs. +const synchronousWaitTime = 100 * time.Millisecond + +// This is a var so that tests can change it. +var minAckDeadline = 10 * time.Second + +// DefaultReceiveSettings holds the default values for ReceiveSettings. +var DefaultReceiveSettings = ReceiveSettings{ + MaxExtension: 60 * time.Minute, + MaxExtensionPeriod: 0, + MaxOutstandingMessages: 1000, + MaxOutstandingBytes: 1e9, // 1G + NumGoroutines: 1, +} + +// Delete deletes the subscription. +func (s *Subscription) Delete(ctx context.Context) error { + return s.c.subc.DeleteSubscription(ctx, &pb.DeleteSubscriptionRequest{Subscription: s.name}) +} + +// Exists reports whether the subscription exists on the server. +func (s *Subscription) Exists(ctx context.Context) (bool, error) { + _, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name}) + if err == nil { + return true, nil + } + if status.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +// Config fetches the current configuration for the subscription. +func (s *Subscription) Config(ctx context.Context) (SubscriptionConfig, error) { + pbSub, err := s.c.subc.GetSubscription(ctx, &pb.GetSubscriptionRequest{Subscription: s.name}) + if err != nil { + return SubscriptionConfig{}, err + } + cfg, err := protoToSubscriptionConfig(pbSub, s.c) + if err != nil { + return SubscriptionConfig{}, err + } + return cfg, nil +} + +// SubscriptionConfigToUpdate describes how to update a subscription. +type SubscriptionConfigToUpdate struct { + // If non-nil, the push config is changed. + PushConfig *PushConfig + + // If non-zero, the ack deadline is changed. + AckDeadline time.Duration + + // If set, RetainAckedMessages is changed. + RetainAckedMessages optional.Bool + + // If non-zero, RetentionDuration is changed. + RetentionDuration time.Duration + + // If non-zero, Expiration is changed. + ExpirationPolicy optional.Duration + + // If non-nil, DeadLetterPolicy is changed. To remove dead lettering from + // a subscription, use the zero value for this struct. + // + // It is EXPERIMENTAL and a part of a closed alpha that may not be + // accessible to all users. + DeadLetterPolicy *DeadLetterPolicy + + // If non-nil, the current set of labels is completely + // replaced by the new set. + // This field has beta status. It is not subject to the stability guarantee + // and may change. + Labels map[string]string +} + +// Update changes an existing subscription according to the fields set in cfg. +// It returns the new SubscriptionConfig. +// +// Update returns an error if no fields were modified. +func (s *Subscription) Update(ctx context.Context, cfg SubscriptionConfigToUpdate) (SubscriptionConfig, error) { + req := s.updateRequest(&cfg) + if err := cfg.validate(); err != nil { + return SubscriptionConfig{}, fmt.Errorf("pubsub: UpdateSubscription %v", err) + } + if len(req.UpdateMask.Paths) == 0 { + return SubscriptionConfig{}, errors.New("pubsub: UpdateSubscription call with nothing to update") + } + rpsub, err := s.c.subc.UpdateSubscription(ctx, req) + if err != nil { + return SubscriptionConfig{}, err + } + return protoToSubscriptionConfig(rpsub, s.c) +} + +func (s *Subscription) updateRequest(cfg *SubscriptionConfigToUpdate) *pb.UpdateSubscriptionRequest { + psub := &pb.Subscription{Name: s.name} + var paths []string + if cfg.PushConfig != nil { + psub.PushConfig = cfg.PushConfig.toProto() + paths = append(paths, "push_config") + } + if cfg.AckDeadline != 0 { + psub.AckDeadlineSeconds = trunc32(int64(cfg.AckDeadline.Seconds())) + paths = append(paths, "ack_deadline_seconds") + } + if cfg.RetainAckedMessages != nil { + psub.RetainAckedMessages = optional.ToBool(cfg.RetainAckedMessages) + paths = append(paths, "retain_acked_messages") + } + if cfg.RetentionDuration != 0 { + psub.MessageRetentionDuration = ptypes.DurationProto(cfg.RetentionDuration) + paths = append(paths, "message_retention_duration") + } + if cfg.ExpirationPolicy != nil { + psub.ExpirationPolicy = expirationPolicyToProto(cfg.ExpirationPolicy) + paths = append(paths, "expiration_policy") + } + if cfg.DeadLetterPolicy != nil { + psub.DeadLetterPolicy = cfg.DeadLetterPolicy.toProto() + paths = append(paths, "dead_letter_policy") + } + if cfg.Labels != nil { + psub.Labels = cfg.Labels + paths = append(paths, "labels") + } + return &pb.UpdateSubscriptionRequest{ + Subscription: psub, + UpdateMask: &fmpb.FieldMask{Paths: paths}, + } +} + +const ( + // The minimum expiration policy duration is 1 day as per: + // https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L606-L607 + minExpirationPolicy = 24 * time.Hour + + // If an expiration policy is not specified, the default of 31 days is used as per: + // https://github.com/googleapis/googleapis/blob/51145ff7812d2bb44c1219d0b76dac92a8bd94b2/google/pubsub/v1/pubsub.proto#L605-L606 + defaultExpirationPolicy = 31 * 24 * time.Hour +) + +func (cfg *SubscriptionConfigToUpdate) validate() error { + if cfg == nil || cfg.ExpirationPolicy == nil { + return nil + } + policy, min := optional.ToDuration(cfg.ExpirationPolicy), minExpirationPolicy + if policy == 0 || policy >= min { + return nil + } + return fmt.Errorf("invalid expiration policy(%q) < minimum(%q)", policy, min) +} + +func expirationPolicyToProto(expirationPolicy optional.Duration) *pb.ExpirationPolicy { + if expirationPolicy == nil { + return nil + } + + dur := optional.ToDuration(expirationPolicy) + var ttl *durpb.Duration + // As per: + // https://godoc.org/google.golang.org/genproto/googleapis/pubsub/v1#ExpirationPolicy.Ttl + // if ExpirationPolicy.Ttl is set to nil, the expirationPolicy is toggled to NEVER expire. + if dur != 0 { + ttl = ptypes.DurationProto(dur) + } + return &pb.ExpirationPolicy{ + Ttl: ttl, + } +} + +// IAM returns the subscription's IAM handle. +func (s *Subscription) IAM() *iam.Handle { + return iam.InternalNewHandle(s.c.subc.Connection(), s.name) +} + +// CreateSubscription creates a new subscription on a topic. +// +// id is the name of the subscription to create. It must start with a letter, +// and contain only letters ([A-Za-z]), numbers ([0-9]), dashes (-), +// underscores (_), periods (.), tildes (~), plus (+) or percent signs (%). It +// must be between 3 and 255 characters in length, and must not start with +// "goog". +// +// cfg.Topic is the topic from which the subscription should receive messages. It +// need not belong to the same project as the subscription. This field is required. +// +// cfg.AckDeadline is the maximum time after a subscriber receives a message before +// the subscriber should acknowledge the message. It must be between 10 and 600 +// seconds (inclusive), and is rounded down to the nearest second. If the +// provided ackDeadline is 0, then the default value of 10 seconds is used. +// Note: messages which are obtained via Subscription.Receive need not be +// acknowledged within this deadline, as the deadline will be automatically +// extended. +// +// cfg.PushConfig may be set to configure this subscription for push delivery. +// +// If the subscription already exists an error will be returned. +func (c *Client) CreateSubscription(ctx context.Context, id string, cfg SubscriptionConfig) (*Subscription, error) { + if cfg.Topic == nil { + return nil, errors.New("pubsub: require non-nil Topic") + } + if cfg.AckDeadline == 0 { + cfg.AckDeadline = 10 * time.Second + } + if d := cfg.AckDeadline; d < 10*time.Second || d > 600*time.Second { + return nil, fmt.Errorf("ack deadline must be between 10 and 600 seconds; got: %v", d) + } + + sub := c.Subscription(id) + _, err := c.subc.CreateSubscription(ctx, cfg.toProto(sub.name)) + if err != nil { + return nil, err + } + return sub, nil +} + +var errReceiveInProgress = errors.New("pubsub: Receive already in progress for this subscription") + +// Receive calls f with the outstanding messages from the subscription. +// It blocks until ctx is done, or the service returns a non-retryable error. +// +// The standard way to terminate a Receive is to cancel its context: +// +// cctx, cancel := context.WithCancel(ctx) +// err := sub.Receive(cctx, callback) +// // Call cancel from callback, or another goroutine. +// +// If the service returns a non-retryable error, Receive returns that error after +// all of the outstanding calls to f have returned. If ctx is done, Receive +// returns nil after all of the outstanding calls to f have returned and +// all messages have been acknowledged or have expired. +// +// Receive calls f concurrently from multiple goroutines. It is encouraged to +// process messages synchronously in f, even if that processing is relatively +// time-consuming; Receive will spawn new goroutines for incoming messages, +// limited by MaxOutstandingMessages and MaxOutstandingBytes in ReceiveSettings. +// +// The context passed to f will be canceled when ctx is Done or there is a +// fatal service error. +// +// Receive will send an ack deadline extension on message receipt, then +// automatically extend the ack deadline of all fetched Messages up to the +// period specified by s.ReceiveSettings.MaxExtension. +// +// Each Subscription may have only one invocation of Receive active at a time. +func (s *Subscription) Receive(ctx context.Context, f func(context.Context, *Message)) error { + s.mu.Lock() + if s.receiveActive { + s.mu.Unlock() + return errReceiveInProgress + } + s.receiveActive = true + s.mu.Unlock() + defer func() { s.mu.Lock(); s.receiveActive = false; s.mu.Unlock() }() + + maxCount := s.ReceiveSettings.MaxOutstandingMessages + if maxCount == 0 { + maxCount = DefaultReceiveSettings.MaxOutstandingMessages + } + maxBytes := s.ReceiveSettings.MaxOutstandingBytes + if maxBytes == 0 { + maxBytes = DefaultReceiveSettings.MaxOutstandingBytes + } + maxExt := s.ReceiveSettings.MaxExtension + if maxExt == 0 { + maxExt = DefaultReceiveSettings.MaxExtension + } else if maxExt < 0 { + // If MaxExtension is negative, disable automatic extension. + maxExt = 0 + } + var numGoroutines int + switch { + case s.ReceiveSettings.Synchronous: + numGoroutines = 1 + case s.ReceiveSettings.NumGoroutines >= 1: + numGoroutines = s.ReceiveSettings.NumGoroutines + default: + numGoroutines = DefaultReceiveSettings.NumGoroutines + } + // TODO(jba): add tests that verify that ReceiveSettings are correctly processed. + po := &pullOptions{ + maxExtension: maxExt, + maxPrefetch: trunc32(int64(maxCount)), + synchronous: s.ReceiveSettings.Synchronous, + } + fc := newFlowController(maxCount, maxBytes) + + // Wait for all goroutines started by Receive to return, so instead of an + // obscure goroutine leak we have an obvious blocked call to Receive. + group, gctx := errgroup.WithContext(ctx) + for i := 0; i < numGoroutines; i++ { + group.Go(func() error { + return s.receive(gctx, po, fc, f) + }) + } + return group.Wait() +} + +func (s *Subscription) receive(ctx context.Context, po *pullOptions, fc *flowController, f func(context.Context, *Message)) error { + // Cancel a sub-context when we return, to kick the context-aware callbacks + // and the goroutine below. + ctx2, cancel := context.WithCancel(ctx) + // The iterator does not use the context passed to Receive. If it did, canceling + // that context would immediately stop the iterator without waiting for unacked + // messages. + iter := newMessageIterator(s.c.subc, s.name, &s.ReceiveSettings.MaxExtensionPeriod, po) + + // We cannot use errgroup from Receive here. Receive might already be calling group.Wait, + // and group.Wait cannot be called concurrently with group.Go. We give each receive() its + // own WaitGroup instead. + // Since wg.Add is only called from the main goroutine, wg.Wait is guaranteed + // to be called after all Adds. + var wg sync.WaitGroup + wg.Add(1) + go func() { + <-ctx2.Done() + // Call stop when Receive's context is done. + // Stop will block until all outstanding messages have been acknowledged + // or there was a fatal service error. + iter.stop() + wg.Done() + }() + defer wg.Wait() + + defer cancel() + for { + var maxToPull int32 // maximum number of messages to pull + if po.synchronous { + if po.maxPrefetch < 0 { + // If there is no limit on the number of messages to pull, use a reasonable default. + maxToPull = 1000 + } else { + // Limit the number of messages in memory to MaxOutstandingMessages + // (here, po.maxPrefetch). For each message currently in memory, we have + // called fc.acquire but not fc.release: this is fc.count(). The next + // call to Pull should fetch no more than the difference between these + // values. + maxToPull = po.maxPrefetch - int32(fc.count()) + if maxToPull <= 0 { + // Wait for some callbacks to finish. + if err := gax.Sleep(ctx, synchronousWaitTime); err != nil { + // Return nil if the context is done, not err. + return nil + } + continue + } + } + } + msgs, err := iter.receive(maxToPull) + if err == io.EOF { + return nil + } + if err != nil { + return err + } + for i, msg := range msgs { + msg := msg + // TODO(jba): call acquire closer to when the message is allocated. + if err := fc.acquire(ctx, len(msg.Data)); err != nil { + // TODO(jba): test that these "orphaned" messages are nacked immediately when ctx is done. + for _, m := range msgs[i:] { + m.Nack() + } + // Return nil if the context is done, not err. + return nil + } + old := msg.doneFunc + msgLen := len(msg.Data) + msg.doneFunc = func(ackID string, ack bool, receiveTime time.Time) { + defer fc.release(msgLen) + old(ackID, ack, receiveTime) + } + wg.Add(1) + go func() { + defer wg.Done() + f(ctx2, msg) + }() + } + } +} + +type pullOptions struct { + maxExtension time.Duration + maxPrefetch int32 + // If true, use unary Pull instead of StreamingPull, and never pull more + // than maxPrefetch messages. + synchronous bool +} diff --git a/vendor/cloud.google.com/go/pubsub/topic.go b/vendor/cloud.google.com/go/pubsub/topic.go new file mode 100644 index 000000000000..cf8fc5d3460e --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/topic.go @@ -0,0 +1,558 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "errors" + "fmt" + "log" + "runtime" + "strings" + "sync" + "time" + + "cloud.google.com/go/iam" + "github.com/golang/protobuf/proto" + gax "github.com/googleapis/gax-go/v2" + "go.opencensus.io/stats" + "go.opencensus.io/tag" + "google.golang.org/api/support/bundler" + pb "google.golang.org/genproto/googleapis/pubsub/v1" + fmpb "google.golang.org/genproto/protobuf/field_mask" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +const ( + // MaxPublishRequestCount is the maximum number of messages that can be in + // a single publish request, as defined by the PubSub service. + MaxPublishRequestCount = 1000 + + // MaxPublishRequestBytes is the maximum size of a single publish request + // in bytes, as defined by the PubSub service. + MaxPublishRequestBytes = 1e7 +) + +// ErrOversizedMessage indicates that a message's size exceeds MaxPublishRequestBytes. +var ErrOversizedMessage = bundler.ErrOversizedItem + +// Topic is a reference to a PubSub topic. +// +// The methods of Topic are safe for use by multiple goroutines. +type Topic struct { + c *Client + // The fully qualified identifier for the topic, in the format "projects//topics/" + name string + + // Settings for publishing messages. All changes must be made before the + // first call to Publish. The default is DefaultPublishSettings. + PublishSettings PublishSettings + + mu sync.RWMutex + stopped bool + bundler *bundler.Bundler +} + +// PublishSettings control the bundling of published messages. +type PublishSettings struct { + + // Publish a non-empty batch after this delay has passed. + DelayThreshold time.Duration + + // Publish a batch when it has this many messages. The maximum is + // MaxPublishRequestCount. + CountThreshold int + + // Publish a batch when its size in bytes reaches this value. + ByteThreshold int + + // The number of goroutines that invoke the Publish RPC concurrently. + // + // Defaults to a multiple of GOMAXPROCS. + NumGoroutines int + + // The maximum time that the client will attempt to publish a bundle of messages. + Timeout time.Duration + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. + // + // Defaults to DefaultPublishSettings.BufferedByteLimit. + BufferedByteLimit int +} + +// DefaultPublishSettings holds the default values for topics' PublishSettings. +var DefaultPublishSettings = PublishSettings{ + DelayThreshold: 10 * time.Millisecond, + CountThreshold: 100, + ByteThreshold: 1e6, + Timeout: 60 * time.Second, + // By default, limit the bundler to 10 times the max message size. The number 10 is + // chosen as a reasonable amount of messages in the worst case whilst still + // capping the number to a low enough value to not OOM users. + BufferedByteLimit: 10 * MaxPublishRequestBytes, +} + +// CreateTopic creates a new topic. +// +// The specified topic ID must start with a letter, and contain only letters +// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), +// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 +// characters in length, and must not start with "goog". For more information, +// see: https://cloud.google.com/pubsub/docs/admin#resource_names +// +// If the topic already exists an error will be returned. +func (c *Client) CreateTopic(ctx context.Context, topicID string) (*Topic, error) { + t := c.Topic(topicID) + _, err := c.pubc.CreateTopic(ctx, &pb.Topic{Name: t.name}) + if err != nil { + return nil, err + } + return t, nil +} + +// CreateTopicWithConfig creates a topic from TopicConfig. +// +// The specified topic ID must start with a letter, and contain only letters +// ([A-Za-z]), numbers ([0-9]), dashes (-), underscores (_), periods (.), +// tildes (~), plus (+) or percent signs (%). It must be between 3 and 255 +// characters in length, and must not start with "goog". For more information, +// see: https://cloud.google.com/pubsub/docs/admin#resource_names. +// +// If the topic already exists, an error will be returned. +func (c *Client) CreateTopicWithConfig(ctx context.Context, topicID string, tc *TopicConfig) (*Topic, error) { + t := c.Topic(topicID) + _, err := c.pubc.CreateTopic(ctx, &pb.Topic{ + Name: t.name, + Labels: tc.Labels, + MessageStoragePolicy: messageStoragePolicyToProto(&tc.MessageStoragePolicy), + KmsKeyName: tc.KMSKeyName, + }) + if err != nil { + return nil, err + } + return t, nil +} + +// Topic creates a reference to a topic in the client's project. +// +// If a Topic's Publish method is called, it has background goroutines +// associated with it. Clean them up by calling Topic.Stop. +// +// Avoid creating many Topic instances if you use them to publish. +func (c *Client) Topic(id string) *Topic { + return c.TopicInProject(id, c.projectID) +} + +// TopicInProject creates a reference to a topic in the given project. +// +// If a Topic's Publish method is called, it has background goroutines +// associated with it. Clean them up by calling Topic.Stop. +// +// Avoid creating many Topic instances if you use them to publish. +func (c *Client) TopicInProject(id, projectID string) *Topic { + return newTopic(c, fmt.Sprintf("projects/%s/topics/%s", projectID, id)) +} + +func newTopic(c *Client, name string) *Topic { + return &Topic{ + c: c, + name: name, + PublishSettings: DefaultPublishSettings, + } +} + +// TopicConfig describes the configuration of a topic. +type TopicConfig struct { + // The set of labels for the topic. + Labels map[string]string + + // The topic's message storage policy. + MessageStoragePolicy MessageStoragePolicy + + // The name of the Cloud KMS key to be used to protect access to messages + // published to this topic, in the format + // "projects/P/locations/L/keyRings/R/cryptoKeys/K". + KMSKeyName string +} + +// TopicConfigToUpdate describes how to update a topic. +type TopicConfigToUpdate struct { + // If non-nil, the current set of labels is completely + // replaced by the new set. + Labels map[string]string + + // If non-nil, the existing policy (containing the list of regions) + // is completely replaced by the new policy. + // + // Use the zero value &MessageStoragePolicy{} to reset the topic back to + // using the organization's Resource Location Restriction policy. + // + // If nil, the policy remains unchanged. + // + // This field has beta status. It is not subject to the stability guarantee + // and may change. + MessageStoragePolicy *MessageStoragePolicy +} + +func protoToTopicConfig(pbt *pb.Topic) TopicConfig { + return TopicConfig{ + Labels: pbt.Labels, + MessageStoragePolicy: protoToMessageStoragePolicy(pbt.MessageStoragePolicy), + KMSKeyName: pbt.KmsKeyName, + } +} + +// MessageStoragePolicy constrains how messages published to the topic may be stored. It +// is determined when the topic is created based on the policy configured at +// the project level. +type MessageStoragePolicy struct { + // AllowedPersistenceRegions is the list of GCP regions where messages that are published + // to the topic may be persisted in storage. Messages published by publishers running in + // non-allowed GCP regions (or running outside of GCP altogether) will be + // routed for storage in one of the allowed regions. + // + // If empty, it indicates a misconfiguration at the project or organization level, which + // will result in all Publish operations failing. This field cannot be empty in updates. + // + // If nil, then the policy is not defined on a topic level. When used in updates, it resets + // the regions back to the organization level Resource Location Restriction policy. + // + // For more information, see + // https://cloud.google.com/pubsub/docs/resource-location-restriction#pubsub-storage-locations. + AllowedPersistenceRegions []string +} + +func protoToMessageStoragePolicy(msp *pb.MessageStoragePolicy) MessageStoragePolicy { + if msp == nil { + return MessageStoragePolicy{} + } + return MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions} +} + +func messageStoragePolicyToProto(msp *MessageStoragePolicy) *pb.MessageStoragePolicy { + if msp == nil || msp.AllowedPersistenceRegions == nil { + return nil + } + return &pb.MessageStoragePolicy{AllowedPersistenceRegions: msp.AllowedPersistenceRegions} +} + +// Config returns the TopicConfig for the topic. +func (t *Topic) Config(ctx context.Context) (TopicConfig, error) { + pbt, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) + if err != nil { + return TopicConfig{}, err + } + return protoToTopicConfig(pbt), nil +} + +// Update changes an existing topic according to the fields set in cfg. It returns +// the new TopicConfig. +func (t *Topic) Update(ctx context.Context, cfg TopicConfigToUpdate) (TopicConfig, error) { + req := t.updateRequest(cfg) + if len(req.UpdateMask.Paths) == 0 { + return TopicConfig{}, errors.New("pubsub: UpdateTopic call with nothing to update") + } + rpt, err := t.c.pubc.UpdateTopic(ctx, req) + if err != nil { + return TopicConfig{}, err + } + return protoToTopicConfig(rpt), nil +} + +func (t *Topic) updateRequest(cfg TopicConfigToUpdate) *pb.UpdateTopicRequest { + pt := &pb.Topic{Name: t.name} + var paths []string + if cfg.Labels != nil { + pt.Labels = cfg.Labels + paths = append(paths, "labels") + } + if cfg.MessageStoragePolicy != nil { + pt.MessageStoragePolicy = messageStoragePolicyToProto(cfg.MessageStoragePolicy) + paths = append(paths, "message_storage_policy") + } + return &pb.UpdateTopicRequest{ + Topic: pt, + UpdateMask: &fmpb.FieldMask{Paths: paths}, + } +} + +// Topics returns an iterator which returns all of the topics for the client's project. +func (c *Client) Topics(ctx context.Context) *TopicIterator { + it := c.pubc.ListTopics(ctx, &pb.ListTopicsRequest{Project: c.fullyQualifiedProjectName()}) + return &TopicIterator{ + c: c, + next: func() (string, error) { + topic, err := it.Next() + if err != nil { + return "", err + } + return topic.Name, nil + }, + } +} + +// TopicIterator is an iterator that returns a series of topics. +type TopicIterator struct { + c *Client + next func() (string, error) +} + +// Next returns the next topic. If there are no more topics, iterator.Done will be returned. +func (tps *TopicIterator) Next() (*Topic, error) { + topicName, err := tps.next() + if err != nil { + return nil, err + } + return newTopic(tps.c, topicName), nil +} + +// ID returns the unique identifier of the topic within its project. +func (t *Topic) ID() string { + slash := strings.LastIndex(t.name, "/") + if slash == -1 { + // name is not a fully-qualified name. + panic("bad topic name") + } + return t.name[slash+1:] +} + +// String returns the printable globally unique name for the topic. +func (t *Topic) String() string { + return t.name +} + +// Delete deletes the topic. +func (t *Topic) Delete(ctx context.Context) error { + return t.c.pubc.DeleteTopic(ctx, &pb.DeleteTopicRequest{Topic: t.name}) +} + +// Exists reports whether the topic exists on the server. +func (t *Topic) Exists(ctx context.Context) (bool, error) { + if t.name == "_deleted-topic_" { + return false, nil + } + _, err := t.c.pubc.GetTopic(ctx, &pb.GetTopicRequest{Topic: t.name}) + if err == nil { + return true, nil + } + if status.Code(err) == codes.NotFound { + return false, nil + } + return false, err +} + +// IAM returns the topic's IAM handle. +func (t *Topic) IAM() *iam.Handle { + return iam.InternalNewHandle(t.c.pubc.Connection(), t.name) +} + +// Subscriptions returns an iterator which returns the subscriptions for this topic. +// +// Some of the returned subscriptions may belong to a project other than t. +func (t *Topic) Subscriptions(ctx context.Context) *SubscriptionIterator { + it := t.c.pubc.ListTopicSubscriptions(ctx, &pb.ListTopicSubscriptionsRequest{ + Topic: t.name, + }) + return &SubscriptionIterator{ + c: t.c, + next: it.Next, + } +} + +var errTopicStopped = errors.New("pubsub: Stop has been called for this topic") + +// Publish publishes msg to the topic asynchronously. Messages are batched and +// sent according to the topic's PublishSettings. Publish never blocks. +// +// Publish returns a non-nil PublishResult which will be ready when the +// message has been sent (or has failed to be sent) to the server. +// +// Publish creates goroutines for batching and sending messages. These goroutines +// need to be stopped by calling t.Stop(). Once stopped, future calls to Publish +// will immediately return a PublishResult with an error. +func (t *Topic) Publish(ctx context.Context, msg *Message) *PublishResult { + // Use a PublishRequest with only the Messages field to calculate the size + // of an individual message. This accurately calculates the size of the + // encoded proto message by accounting for the length of an individual + // PubSubMessage and Data/Attributes field. + // TODO(hongalex): if this turns out to take significant time, try to approximate it. + msg.size = proto.Size(&pb.PublishRequest{ + Messages: []*pb.PubsubMessage{ + { + Data: msg.Data, + Attributes: msg.Attributes, + }, + }, + }) + r := &PublishResult{ready: make(chan struct{})} + t.initBundler() + t.mu.RLock() + defer t.mu.RUnlock() + // TODO(aboulhosn) [from bcmills] consider changing the semantics of bundler to perform this logic so we don't have to do it here + if t.stopped { + r.set("", errTopicStopped) + return r + } + + // TODO(jba) [from bcmills] consider using a shared channel per bundle + // (requires Bundler API changes; would reduce allocations) + err := t.bundler.Add(&bundledMessage{msg, r}, msg.size) + if err != nil { + r.set("", err) + } + return r +} + +// Stop sends all remaining published messages and stop goroutines created for handling +// publishing. Returns once all outstanding messages have been sent or have +// failed to be sent. +func (t *Topic) Stop() { + t.mu.Lock() + noop := t.stopped || t.bundler == nil + t.stopped = true + t.mu.Unlock() + if noop { + return + } + t.bundler.Flush() +} + +// A PublishResult holds the result from a call to Publish. +type PublishResult struct { + ready chan struct{} + serverID string + err error +} + +// Ready returns a channel that is closed when the result is ready. +// When the Ready channel is closed, Get is guaranteed not to block. +func (r *PublishResult) Ready() <-chan struct{} { return r.ready } + +// Get returns the server-generated message ID and/or error result of a Publish call. +// Get blocks until the Publish call completes or the context is done. +func (r *PublishResult) Get(ctx context.Context) (serverID string, err error) { + // If the result is already ready, return it even if the context is done. + select { + case <-r.Ready(): + return r.serverID, r.err + default: + } + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-r.Ready(): + return r.serverID, r.err + } +} + +func (r *PublishResult) set(sid string, err error) { + r.serverID = sid + r.err = err + close(r.ready) +} + +type bundledMessage struct { + msg *Message + res *PublishResult +} + +func (t *Topic) initBundler() { + t.mu.RLock() + noop := t.stopped || t.bundler != nil + t.mu.RUnlock() + if noop { + return + } + t.mu.Lock() + defer t.mu.Unlock() + // Must re-check, since we released the lock. + if t.stopped || t.bundler != nil { + return + } + + timeout := t.PublishSettings.Timeout + t.bundler = bundler.NewBundler(&bundledMessage{}, func(items interface{}) { + // TODO(jba): use a context detached from the one passed to NewClient. + ctx := context.TODO() + if timeout != 0 { + var cancel func() + ctx, cancel = context.WithTimeout(ctx, timeout) + defer cancel() + } + t.publishMessageBundle(ctx, items.([]*bundledMessage)) + }) + t.bundler.DelayThreshold = t.PublishSettings.DelayThreshold + t.bundler.BundleCountThreshold = t.PublishSettings.CountThreshold + if t.bundler.BundleCountThreshold > MaxPublishRequestCount { + t.bundler.BundleCountThreshold = MaxPublishRequestCount + } + t.bundler.BundleByteThreshold = t.PublishSettings.ByteThreshold + + bufferedByteLimit := DefaultPublishSettings.BufferedByteLimit + if t.PublishSettings.BufferedByteLimit > 0 { + bufferedByteLimit = t.PublishSettings.BufferedByteLimit + } + t.bundler.BufferedByteLimit = bufferedByteLimit + + // Set the bundler's max size per payload, accounting for topic name's overhead. + t.bundler.BundleByteLimit = MaxPublishRequestBytes - calcFieldSizeString(t.name) + // Unless overridden, allow many goroutines per CPU to call the Publish RPC concurrently. + // The default value was determined via extensive load testing (see the loadtest subdirectory). + if t.PublishSettings.NumGoroutines > 0 { + t.bundler.HandlerLimit = t.PublishSettings.NumGoroutines + } else { + t.bundler.HandlerLimit = 25 * runtime.GOMAXPROCS(0) + } +} + +func (t *Topic) publishMessageBundle(ctx context.Context, bms []*bundledMessage) { + ctx, err := tag.New(ctx, tag.Insert(keyStatus, "OK"), tag.Upsert(keyTopic, t.name)) + if err != nil { + log.Printf("pubsub: cannot create context with tag in publishMessageBundle: %v", err) + } + pbMsgs := make([]*pb.PubsubMessage, len(bms)) + for i, bm := range bms { + pbMsgs[i] = &pb.PubsubMessage{ + Data: bm.msg.Data, + Attributes: bm.msg.Attributes, + } + bm.msg = nil // release bm.msg for GC + } + start := time.Now() + res, err := t.c.pubc.Publish(ctx, &pb.PublishRequest{ + Topic: t.name, + Messages: pbMsgs, + }, gax.WithGRPCOptions(grpc.MaxCallSendMsgSize(maxSendRecvBytes))) + end := time.Now() + if err != nil { + // Update context with error tag for OpenCensus, + // using same stats.Record() call as success case. + ctx, _ = tag.New(ctx, tag.Upsert(keyStatus, "ERROR"), + tag.Upsert(keyError, err.Error())) + } + stats.Record(ctx, + PublishLatency.M(float64(end.Sub(start)/time.Millisecond)), + PublishedMessages.M(int64(len(bms)))) + for i, bm := range bms { + if err != nil { + bm.res.set("", err) + } else { + bm.res.set(res.MessageIds[i], nil) + } + } +} diff --git a/vendor/cloud.google.com/go/pubsub/trace.go b/vendor/cloud.google.com/go/pubsub/trace.go new file mode 100644 index 000000000000..4036a350f981 --- /dev/null +++ b/vendor/cloud.google.com/go/pubsub/trace.go @@ -0,0 +1,217 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pubsub + +import ( + "context" + "log" + "sync" + + "go.opencensus.io/plugin/ocgrpc" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "google.golang.org/api/option" + "google.golang.org/grpc" +) + +func openCensusOptions() []option.ClientOption { + return []option.ClientOption{ + option.WithGRPCDialOption(grpc.WithStatsHandler(&ocgrpc.ClientHandler{})), + } +} + +// The following keys are used to tag requests with a specific topic/subscription ID. +var ( + keyTopic = tag.MustNewKey("topic") + keySubscription = tag.MustNewKey("subscription") +) + +// In the following, errors are used if status is not "OK". +var ( + keyStatus = tag.MustNewKey("status") + keyError = tag.MustNewKey("error") +) + +const statsPrefix = "cloud.google.com/go/pubsub/" + +// The following are measures recorded in publish/subscribe flows. +var ( + // PublishedMessages is a measure of the number of messages published, which may include errors. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublishedMessages = stats.Int64(statsPrefix+"published_messages", "Number of PubSub message published", stats.UnitDimensionless) + + // PublishLatency is a measure of the number of milliseconds it took to publish a bundle, + // which may consist of one or more messages. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublishLatency = stats.Float64(statsPrefix+"publish_roundtrip_latency", "The latency in milliseconds per publish batch", stats.UnitMilliseconds) + + // PullCount is a measure of the number of messages pulled. + // It is EXPERIMENTAL and subject to change or removal without notice. + PullCount = stats.Int64(statsPrefix+"pull_count", "Number of PubSub messages pulled", stats.UnitDimensionless) + + // AckCount is a measure of the number of messages acked. + // It is EXPERIMENTAL and subject to change or removal without notice. + AckCount = stats.Int64(statsPrefix+"ack_count", "Number of PubSub messages acked", stats.UnitDimensionless) + + // NackCount is a measure of the number of messages nacked. + // It is EXPERIMENTAL and subject to change or removal without notice. + NackCount = stats.Int64(statsPrefix+"nack_count", "Number of PubSub messages nacked", stats.UnitDimensionless) + + // ModAckCount is a measure of the number of messages whose ack-deadline was modified. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckCount = stats.Int64(statsPrefix+"mod_ack_count", "Number of ack-deadlines modified", stats.UnitDimensionless) + + // ModAckTimeoutCount is a measure of the number ModifyAckDeadline RPCs that timed out. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckTimeoutCount = stats.Int64(statsPrefix+"mod_ack_timeout_count", "Number of ModifyAckDeadline RPCs that timed out", stats.UnitDimensionless) + + // StreamOpenCount is a measure of the number of times a streaming-pull stream was opened. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamOpenCount = stats.Int64(statsPrefix+"stream_open_count", "Number of calls opening a new streaming pull", stats.UnitDimensionless) + + // StreamRetryCount is a measure of the number of times a streaming-pull operation was retried. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRetryCount = stats.Int64(statsPrefix+"stream_retry_count", "Number of retries of a stream send or receive", stats.UnitDimensionless) + + // StreamRequestCount is a measure of the number of requests sent on a streaming-pull stream. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRequestCount = stats.Int64(statsPrefix+"stream_request_count", "Number gRPC StreamingPull request messages sent", stats.UnitDimensionless) + + // StreamResponseCount is a measure of the number of responses received on a streaming-pull stream. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamResponseCount = stats.Int64(statsPrefix+"stream_response_count", "Number of gRPC StreamingPull response messages received", stats.UnitDimensionless) +) + +var ( + // PublishedMessagesView is a cumulative sum of PublishedMessages. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublishedMessagesView *view.View + + // PublishLatencyView is a distribution of PublishLatency. + // It is EXPERIMENTAL and subject to change or removal without notice. + PublishLatencyView *view.View + + // PullCountView is a cumulative sum of PullCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + PullCountView *view.View + + // AckCountView is a cumulative sum of AckCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + AckCountView *view.View + + // NackCountView is a cumulative sum of NackCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + NackCountView *view.View + + // ModAckCountView is a cumulative sum of ModAckCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckCountView *view.View + + // ModAckTimeoutCountView is a cumulative sum of ModAckTimeoutCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + ModAckTimeoutCountView *view.View + + // StreamOpenCountView is a cumulative sum of StreamOpenCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamOpenCountView *view.View + + // StreamRetryCountView is a cumulative sum of StreamRetryCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRetryCountView *view.View + + // StreamRequestCountView is a cumulative sum of StreamRequestCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamRequestCountView *view.View + + // StreamResponseCountView is a cumulative sum of StreamResponseCount. + // It is EXPERIMENTAL and subject to change or removal without notice. + StreamResponseCountView *view.View +) + +func init() { + PublishedMessagesView = createCountView(stats.Measure(PublishedMessages), keyTopic, keyStatus, keyError) + PublishLatencyView = createDistView(PublishLatency, keyTopic, keyStatus, keyError) + PullCountView = createCountView(PullCount, keySubscription) + AckCountView = createCountView(AckCount, keySubscription) + NackCountView = createCountView(NackCount, keySubscription) + ModAckCountView = createCountView(ModAckCount, keySubscription) + ModAckTimeoutCountView = createCountView(ModAckTimeoutCount, keySubscription) + StreamOpenCountView = createCountView(StreamOpenCount, keySubscription) + StreamRetryCountView = createCountView(StreamRetryCount, keySubscription) + StreamRequestCountView = createCountView(StreamRequestCount, keySubscription) + StreamResponseCountView = createCountView(StreamResponseCount, keySubscription) + + DefaultPublishViews = []*view.View{ + PublishedMessagesView, + PublishLatencyView, + } + + DefaultSubscribeViews = []*view.View{ + PullCountView, + AckCountView, + NackCountView, + ModAckCountView, + ModAckTimeoutCountView, + StreamOpenCountView, + StreamRetryCountView, + StreamRequestCountView, + StreamResponseCountView, + } +} + +// These arrays hold the default OpenCensus views that keep track of publish/subscribe operations. +// It is EXPERIMENTAL and subject to change or removal without notice. +var ( + DefaultPublishViews []*view.View + DefaultSubscribeViews []*view.View +) + +func createCountView(m stats.Measure, keys ...tag.Key) *view.View { + return &view.View{ + Name: m.Name(), + Description: m.Description(), + TagKeys: keys, + Measure: m, + Aggregation: view.Sum(), + } +} + +func createDistView(m stats.Measure, keys ...tag.Key) *view.View { + return &view.View{ + Name: m.Name(), + Description: m.Description(), + TagKeys: keys, + Measure: m, + Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), + } +} + +var logOnce sync.Once + +// withSubscriptionKey returns a new context modified with the subscriptionKey tag map. +func withSubscriptionKey(ctx context.Context, subName string) context.Context { + ctx, err := tag.New(ctx, tag.Upsert(keySubscription, subName)) + if err != nil { + logOnce.Do(func() { + log.Printf("pubsub: error creating tag map for 'subscribe' key: %v", err) + }) + } + return ctx +} + +func recordStat(ctx context.Context, m *stats.Int64Measure, n int64) { + stats.Record(ctx, m.M(n)) +} diff --git a/vendor/google.golang.org/api/support/bundler/bundler.go b/vendor/google.golang.org/api/support/bundler/bundler.go new file mode 100644 index 000000000000..9f8237a03da7 --- /dev/null +++ b/vendor/google.golang.org/api/support/bundler/bundler.go @@ -0,0 +1,407 @@ +// Copyright 2016 Google LLC. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package bundler supports bundling (batching) of items. Bundling amortizes an +// action with fixed costs over multiple items. For example, if an API provides +// an RPC that accepts a list of items as input, but clients would prefer +// adding items one at a time, then a Bundler can accept individual items from +// the client and bundle many of them into a single RPC. +// +// This package is experimental and subject to change without notice. +package bundler + +import ( + "context" + "errors" + "reflect" + "sync" + "time" + + "golang.org/x/sync/semaphore" +) + +type mode int + +const ( + DefaultDelayThreshold = time.Second + DefaultBundleCountThreshold = 10 + DefaultBundleByteThreshold = 1e6 // 1M + DefaultBufferedByteLimit = 1e9 // 1G +) + +const ( + none mode = iota + add + addWait +) + +var ( + // ErrOverflow indicates that Bundler's stored bytes exceeds its BufferedByteLimit. + ErrOverflow = errors.New("bundler reached buffered byte limit") + + // ErrOversizedItem indicates that an item's size exceeds the maximum bundle size. + ErrOversizedItem = errors.New("item size exceeds bundle byte limit") + + // errMixedMethods indicates that mutually exclusive methods has been + // called subsequently. + errMixedMethods = errors.New("calls to Add and AddWait cannot be mixed") +) + +// A Bundler collects items added to it into a bundle until the bundle +// exceeds a given size, then calls a user-provided function to handle the +// bundle. +// +// The exported fields are only safe to modify prior to the first call to Add +// or AddWait. +type Bundler struct { + // Starting from the time that the first message is added to a bundle, once + // this delay has passed, handle the bundle. The default is DefaultDelayThreshold. + DelayThreshold time.Duration + + // Once a bundle has this many items, handle the bundle. Since only one + // item at a time is added to a bundle, no bundle will exceed this + // threshold, so it also serves as a limit. The default is + // DefaultBundleCountThreshold. + BundleCountThreshold int + + // Once the number of bytes in current bundle reaches this threshold, handle + // the bundle. The default is DefaultBundleByteThreshold. This triggers handling, + // but does not cap the total size of a bundle. + BundleByteThreshold int + + // The maximum size of a bundle, in bytes. Zero means unlimited. + BundleByteLimit int + + // The maximum number of bytes that the Bundler will keep in memory before + // returning ErrOverflow. The default is DefaultBufferedByteLimit. + BufferedByteLimit int + + // The maximum number of handler invocations that can be running at once. + // The default is 1. + HandlerLimit int + + handler func(interface{}) // called to handle a bundle + itemSliceZero reflect.Value // nil (zero value) for slice of items + + mu sync.Mutex // guards access to fields below + flushTimer *time.Timer // implements DelayThreshold + handlerCount int // # of bundles currently being handled (i.e. handler is invoked on them) + sem *semaphore.Weighted // enforces BufferedByteLimit + semOnce sync.Once // guards semaphore initialization + // The current bundle we're adding items to. Not yet in the queue. + // Appended to the queue once the flushTimer fires or the bundle + // thresholds/limits are reached. If curBundle is nil and tail is + // not, we first try to add items to tail. Once tail is full or handled, + // we create a new curBundle for the incoming item. + curBundle *bundle + // The next bundle in the queue to be handled. Nil if the queue is + // empty. + head *bundle + // The last bundle in the queue to be handled. Nil if the queue is + // empty. If curBundle is nil and tail isn't, we attempt to add new + // items to the tail until if becomes full or has been passed to the + // handler. + tail *bundle + curFlush *sync.WaitGroup // counts outstanding bundles since last flush + prevFlush chan bool // signal used to wait for prior flush + + // The first call to Add or AddWait, mode will be add or addWait respectively. + // If there wasn't call yet then mode is none. + mode mode + // TODO: consider alternative queue implementation for head/tail bundle. see: + // https://code-review.googlesource.com/c/google-api-go-client/+/47991/4/support/bundler/bundler.go#74 +} + +// A bundle is a group of items that were added individually and will be passed +// to a handler as a slice. +type bundle struct { + items reflect.Value // slice of T + size int // size in bytes of all items + next *bundle // bundles are handled in order as a linked list queue + flush *sync.WaitGroup // the counter that tracks flush completion +} + +// add appends item to this bundle and increments the total size. It requires +// that b.mu is locked. +func (bu *bundle) add(item interface{}, size int) { + bu.items = reflect.Append(bu.items, reflect.ValueOf(item)) + bu.size += size +} + +// NewBundler creates a new Bundler. +// +// itemExample is a value of the type that will be bundled. For example, if you +// want to create bundles of *Entry, you could pass &Entry{} for itemExample. +// +// handler is a function that will be called on each bundle. If itemExample is +// of type T, the argument to handler is of type []T. handler is always called +// sequentially for each bundle, and never in parallel. +// +// Configure the Bundler by setting its thresholds and limits before calling +// any of its methods. +func NewBundler(itemExample interface{}, handler func(interface{})) *Bundler { + b := &Bundler{ + DelayThreshold: DefaultDelayThreshold, + BundleCountThreshold: DefaultBundleCountThreshold, + BundleByteThreshold: DefaultBundleByteThreshold, + BufferedByteLimit: DefaultBufferedByteLimit, + HandlerLimit: 1, + + handler: handler, + itemSliceZero: reflect.Zero(reflect.SliceOf(reflect.TypeOf(itemExample))), + curFlush: &sync.WaitGroup{}, + } + return b +} + +func (b *Bundler) initSemaphores() { + // Create the semaphores lazily, because the user may set limits + // after NewBundler. + b.semOnce.Do(func() { + b.sem = semaphore.NewWeighted(int64(b.BufferedByteLimit)) + }) +} + +// enqueueCurBundle moves curBundle to the end of the queue. The bundle may be +// handled immediately if we are below HandlerLimit. It requires that b.mu is +// locked. +func (b *Bundler) enqueueCurBundle() { + // We don't require callers to check if there is a pending bundle. It + // may have already been appended to the queue. If so, return early. + if b.curBundle == nil { + return + } + // If we are below the HandlerLimit, the queue must be empty. Handle + // immediately with a new goroutine. + if b.handlerCount < b.HandlerLimit { + b.handlerCount++ + go b.handle(b.curBundle) + } else if b.tail != nil { + // There are bundles on the queue, so append to the end + b.tail.next = b.curBundle + b.tail = b.curBundle + } else { + // The queue is empty, so initialize the queue + b.head = b.curBundle + b.tail = b.curBundle + } + b.curBundle = nil + if b.flushTimer != nil { + b.flushTimer.Stop() + b.flushTimer = nil + } +} + +// setMode sets the state of Bundler's mode. If mode was defined before +// and passed state is different from it then return an error. +func (b *Bundler) setMode(m mode) error { + b.mu.Lock() + defer b.mu.Unlock() + if b.mode == m || b.mode == none { + b.mode = m + return nil + } + return errMixedMethods +} + +// canFit returns true if bu can fit an additional item of size bytes based +// on the limits of Bundler b. +func (b *Bundler) canFit(bu *bundle, size int) bool { + return (b.BundleByteLimit <= 0 || bu.size+size <= b.BundleByteLimit) && + (b.BundleCountThreshold <= 0 || bu.items.Len() < b.BundleCountThreshold) +} + +// Add adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// The type of item must be assignable to the itemExample parameter of the NewBundler +// method, otherwise there will be a panic. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. Add returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed +// (Bundler.BufferedByteLimit) or an AddWait call is blocked waiting for +// memory, Add returns ErrOverflow. +// +// Add never blocks. +func (b *Bundler) Add(item interface{}, size int) error { + if err := b.setMode(add); err != nil { + return err + } + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + + // If adding this item would exceed our allotted memory + // footprint, we can't accept it. + // (TryAcquire also returns false if anything is waiting on the semaphore, + // so calls to Add and AddWait shouldn't be mixed.) + b.initSemaphores() + if !b.sem.TryAcquire(int64(size)) { + return ErrOverflow + } + + b.mu.Lock() + defer b.mu.Unlock() + return b.add(item, size) +} + +// add adds item to the tail of the bundle queue or curBundle depending on space +// and nil-ness (see inline comments). It marks curBundle for handling (by +// appending it to the queue) if any of the thresholds or limits are exceeded. +// curBundle is lazily initialized. It requires that b.mu is locked. +func (b *Bundler) add(item interface{}, size int) error { + // If we don't have a curBundle, see if we can add to the queue tail. + if b.tail != nil && b.curBundle == nil && b.canFit(b.tail, size) { + b.tail.add(item, size) + return nil + } + + // If we can't fit in the existing curBundle, move it onto the queue. + if b.curBundle != nil && !b.canFit(b.curBundle, size) { + b.enqueueCurBundle() + } + + // Create a curBundle if we don't have one. + if b.curBundle == nil { + b.curFlush.Add(1) + b.curBundle = &bundle{ + items: b.itemSliceZero, + flush: b.curFlush, + } + } + + // Add the item. + b.curBundle.add(item, size) + + // If curBundle is ready for handling, move it to the queue. + if b.curBundle.size >= b.BundleByteThreshold || + b.curBundle.items.Len() == b.BundleCountThreshold { + b.enqueueCurBundle() + } + + // If we created a new bundle and it wasn't immediately handled, set a timer + if b.curBundle != nil && b.flushTimer == nil { + b.flushTimer = time.AfterFunc(b.DelayThreshold, b.tryHandleBundles) + } + + return nil +} + +// tryHandleBundles is the timer callback that handles or queues any current +// bundle after DelayThreshold time, even if the bundle isn't completely full. +func (b *Bundler) tryHandleBundles() { + b.mu.Lock() + b.enqueueCurBundle() + b.mu.Unlock() +} + +// next returns the next bundle that is ready for handling and removes it from +// the internal queue. It requires that b.mu is locked. +func (b *Bundler) next() *bundle { + if b.head == nil { + return nil + } + out := b.head + b.head = b.head.next + if b.head == nil { + b.tail = nil + } + out.next = nil + return out +} + +// handle calls the user-specified handler on the given bundle. handle is +// intended to be run as a goroutine. After the handler returns, we update the +// byte total. handle continues processing additional bundles that are ready. +// If no more bundles are ready, the handler count is decremented and the +// goroutine ends. +func (b *Bundler) handle(bu *bundle) { + for bu != nil { + b.handler(bu.items.Interface()) + bu = b.postHandle(bu) + } +} + +func (b *Bundler) postHandle(bu *bundle) *bundle { + b.mu.Lock() + defer b.mu.Unlock() + + b.sem.Release(int64(bu.size)) + bu.flush.Done() + + bu = b.next() + if bu == nil { + b.handlerCount-- + } + return bu +} + +// AddWait adds item to the current bundle. It marks the bundle for handling and +// starts a new one if any of the thresholds or limits are exceeded. +// +// If the item's size exceeds the maximum bundle size (Bundler.BundleByteLimit), then +// the item can never be handled. AddWait returns ErrOversizedItem in this case. +// +// If adding the item would exceed the maximum memory allowed (Bundler.BufferedByteLimit), +// AddWait blocks until space is available or ctx is done. +// +// Calls to Add and AddWait should not be mixed on the same Bundler. +func (b *Bundler) AddWait(ctx context.Context, item interface{}, size int) error { + if err := b.setMode(addWait); err != nil { + return err + } + // If this item exceeds the maximum size of a bundle, + // we can never send it. + if b.BundleByteLimit > 0 && size > b.BundleByteLimit { + return ErrOversizedItem + } + // If adding this item would exceed our allotted memory footprint, block + // until space is available. The semaphore is FIFO, so there will be no + // starvation. + b.initSemaphores() + if err := b.sem.Acquire(ctx, int64(size)); err != nil { + return err + } + + b.mu.Lock() + defer b.mu.Unlock() + return b.add(item, size) +} + +// Flush invokes the handler for all remaining items in the Bundler and waits +// for it to return. +func (b *Bundler) Flush() { + b.mu.Lock() + + // If a curBundle is pending, move it to the queue. + b.enqueueCurBundle() + + // Store a pointer to the WaitGroup that counts outstanding bundles + // in the current flush and create a new one to track the next flush. + wg := b.curFlush + b.curFlush = &sync.WaitGroup{} + + // Flush must wait for all prior, outstanding flushes to complete. + // We use a channel to communicate completion between each flush in + // the sequence. + prev := b.prevFlush + next := make(chan bool) + b.prevFlush = next + + b.mu.Unlock() + + // Wait until the previous flush is finished. + if prev != nil { + <-prev + } + + // Wait until this flush is finished. + wg.Wait() + + // Allow the next flush to finish. + close(next) +} diff --git a/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go new file mode 100644 index 000000000000..3bba438e15f8 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/pubsub/v1/pubsub.pb.go @@ -0,0 +1,5912 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.13.0 +// source: google/pubsub/v1/pubsub.proto + +package pubsub + +import ( + context "context" + reflect "reflect" + sync "sync" + + proto "github.com/golang/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + durationpb "google.golang.org/protobuf/types/known/durationpb" + emptypb "google.golang.org/protobuf/types/known/emptypb" + fieldmaskpb "google.golang.org/protobuf/types/known/fieldmaskpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// A policy constraining the storage of messages published to the topic. +type MessageStoragePolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A list of IDs of GCP regions where messages that are published to the topic + // may be persisted in storage. Messages published by publishers running in + // non-allowed GCP regions (or running outside of GCP altogether) will be + // routed for storage in one of the allowed regions. An empty list means that + // no regions are allowed, and is not a valid configuration. + AllowedPersistenceRegions []string `protobuf:"bytes,1,rep,name=allowed_persistence_regions,json=allowedPersistenceRegions,proto3" json:"allowed_persistence_regions,omitempty"` +} + +func (x *MessageStoragePolicy) Reset() { + *x = MessageStoragePolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *MessageStoragePolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*MessageStoragePolicy) ProtoMessage() {} + +func (x *MessageStoragePolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use MessageStoragePolicy.ProtoReflect.Descriptor instead. +func (*MessageStoragePolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{0} +} + +func (x *MessageStoragePolicy) GetAllowedPersistenceRegions() []string { + if x != nil { + return x.AllowedPersistenceRegions + } + return nil +} + +// A topic resource. +type Topic struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic. It must have the format + // `"projects/{project}/topics/{topic}"`. `{topic}` must start with a letter, + // and contain only letters (`[A-Za-z]`), numbers (`[0-9]`), dashes (`-`), + // underscores (`_`), periods (`.`), tildes (`~`), plus (`+`) or percent + // signs (`%`). It must be between 3 and 255 characters in length, and it + // must not start with `"goog"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). + Labels map[string]string `protobuf:"bytes,2,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Policy constraining the set of Google Cloud Platform regions where messages + // published to the topic may be stored. If not present, then no constraints + // are in effect. + MessageStoragePolicy *MessageStoragePolicy `protobuf:"bytes,3,opt,name=message_storage_policy,json=messageStoragePolicy,proto3" json:"message_storage_policy,omitempty"` + // The resource name of the Cloud KMS CryptoKey to be used to protect access + // to messages published on this topic. + // + // The expected format is `projects/*/locations/*/keyRings/*/cryptoKeys/*`. + KmsKeyName string `protobuf:"bytes,5,opt,name=kms_key_name,json=kmsKeyName,proto3" json:"kms_key_name,omitempty"` +} + +func (x *Topic) Reset() { + *x = Topic{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Topic) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Topic) ProtoMessage() {} + +func (x *Topic) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Topic.ProtoReflect.Descriptor instead. +func (*Topic) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{1} +} + +func (x *Topic) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Topic) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Topic) GetMessageStoragePolicy() *MessageStoragePolicy { + if x != nil { + return x.MessageStoragePolicy + } + return nil +} + +func (x *Topic) GetKmsKeyName() string { + if x != nil { + return x.KmsKeyName + } + return "" +} + +// A message that is published by publishers and consumed by subscribers. The +// message must contain either a non-empty data field or at least one attribute. +// Note that client libraries represent this object differently +// depending on the language. See the corresponding [client library +// documentation](https://cloud.google.com/pubsub/docs/reference/libraries) for +// more information. See [quotas and limits] +// (https://cloud.google.com/pubsub/quotas) for more information about message +// limits. +type PubsubMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The message data field. If this field is empty, the message must contain + // at least one attribute. + Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` + // Attributes for this message. If this field is empty, the message must + // contain non-empty data. This can be used to filter messages on the + // subscription. + Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // ID of this message, assigned by the server when the message is published. + // Guaranteed to be unique within the topic. This value may be read by a + // subscriber that receives a `PubsubMessage` via a `Pull` call or a push + // delivery. It must not be populated by the publisher in a `Publish` call. + MessageId string `protobuf:"bytes,3,opt,name=message_id,json=messageId,proto3" json:"message_id,omitempty"` + // The time at which the message was published, populated by the server when + // it receives the `Publish` call. It must not be populated by the + // publisher in a `Publish` call. + PublishTime *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=publish_time,json=publishTime,proto3" json:"publish_time,omitempty"` + // If non-empty, identifies related messages for which publish order should be + // respected. If a `Subscription` has `enable_message_ordering` set to `true`, + // messages published with the same non-empty `ordering_key` value will be + // delivered to subscribers in the order in which they are received by the + // Pub/Sub system. All `PubsubMessage`s published in a given `PublishRequest` + // must specify the same `ordering_key` value. + OrderingKey string `protobuf:"bytes,5,opt,name=ordering_key,json=orderingKey,proto3" json:"ordering_key,omitempty"` +} + +func (x *PubsubMessage) Reset() { + *x = PubsubMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PubsubMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PubsubMessage) ProtoMessage() {} + +func (x *PubsubMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PubsubMessage.ProtoReflect.Descriptor instead. +func (*PubsubMessage) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{2} +} + +func (x *PubsubMessage) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *PubsubMessage) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *PubsubMessage) GetMessageId() string { + if x != nil { + return x.MessageId + } + return "" +} + +func (x *PubsubMessage) GetPublishTime() *timestamppb.Timestamp { + if x != nil { + return x.PublishTime + } + return nil +} + +func (x *PubsubMessage) GetOrderingKey() string { + if x != nil { + return x.OrderingKey + } + return "" +} + +// Request for the GetTopic method. +type GetTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic to get. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *GetTopicRequest) Reset() { + *x = GetTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetTopicRequest) ProtoMessage() {} + +func (x *GetTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetTopicRequest.ProtoReflect.Descriptor instead. +func (*GetTopicRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{3} +} + +func (x *GetTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +// Request for the UpdateTopic method. +type UpdateTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The updated topic object. + Topic *Topic `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. Indicates which fields in the provided topic to update. Must be + // specified and non-empty. Note that if `update_mask` contains + // "message_storage_policy" but the `message_storage_policy` is not set in + // the `topic` provided above, then the updated value is determined by the + // policy configured at the project or organization level. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateTopicRequest) Reset() { + *x = UpdateTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateTopicRequest) ProtoMessage() {} + +func (x *UpdateTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateTopicRequest.ProtoReflect.Descriptor instead. +func (*UpdateTopicRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{4} +} + +func (x *UpdateTopicRequest) GetTopic() *Topic { + if x != nil { + return x.Topic + } + return nil +} + +func (x *UpdateTopicRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request for the Publish method. +type PublishRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The messages in the request will be published on this topic. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Required. The messages to publish. + Messages []*PubsubMessage `protobuf:"bytes,2,rep,name=messages,proto3" json:"messages,omitempty"` +} + +func (x *PublishRequest) Reset() { + *x = PublishRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishRequest) ProtoMessage() {} + +func (x *PublishRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishRequest.ProtoReflect.Descriptor instead. +func (*PublishRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{5} +} + +func (x *PublishRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *PublishRequest) GetMessages() []*PubsubMessage { + if x != nil { + return x.Messages + } + return nil +} + +// Response for the `Publish` method. +type PublishResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The server-assigned ID of each published message, in the same order as + // the messages in the request. IDs are guaranteed to be unique within + // the topic. + MessageIds []string `protobuf:"bytes,1,rep,name=message_ids,json=messageIds,proto3" json:"message_ids,omitempty"` +} + +func (x *PublishResponse) Reset() { + *x = PublishResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PublishResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PublishResponse) ProtoMessage() {} + +func (x *PublishResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PublishResponse.ProtoReflect.Descriptor instead. +func (*PublishResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{6} +} + +func (x *PublishResponse) GetMessageIds() []string { + if x != nil { + return x.MessageIds + } + return nil +} + +// Request for the `ListTopics` method. +type ListTopicsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the project in which to list topics. + // Format is `projects/{project-id}`. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Maximum number of topics to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The value returned by the last `ListTopicsResponse`; indicates that this is + // a continuation of a prior `ListTopics` call, and that the system should + // return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTopicsRequest) Reset() { + *x = ListTopicsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTopicsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicsRequest) ProtoMessage() {} + +func (x *ListTopicsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicsRequest.ProtoReflect.Descriptor instead. +func (*ListTopicsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{7} +} + +func (x *ListTopicsRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListTopicsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTopicsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListTopics` method. +type ListTopicsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resulting topics. + Topics []*Topic `protobuf:"bytes,1,rep,name=topics,proto3" json:"topics,omitempty"` + // If not empty, indicates that there may be more topics that match the + // request; this value should be passed in a new `ListTopicsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListTopicsResponse) Reset() { + *x = ListTopicsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTopicsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicsResponse) ProtoMessage() {} + +func (x *ListTopicsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[8] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicsResponse.ProtoReflect.Descriptor instead. +func (*ListTopicsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{8} +} + +func (x *ListTopicsResponse) GetTopics() []*Topic { + if x != nil { + return x.Topics + } + return nil +} + +func (x *ListTopicsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `ListTopicSubscriptions` method. +type ListTopicSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic that subscriptions are attached to. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Maximum number of subscription names to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The value returned by the last `ListTopicSubscriptionsResponse`; indicates + // that this is a continuation of a prior `ListTopicSubscriptions` call, and + // that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTopicSubscriptionsRequest) Reset() { + *x = ListTopicSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTopicSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSubscriptionsRequest) ProtoMessage() {} + +func (x *ListTopicSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[9] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*ListTopicSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{9} +} + +func (x *ListTopicSubscriptionsRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ListTopicSubscriptionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTopicSubscriptionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListTopicSubscriptions` method. +type ListTopicSubscriptionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The names of subscriptions attached to the topic specified in the request. + Subscriptions []string `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // `ListTopicSubscriptionsRequest` to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListTopicSubscriptionsResponse) Reset() { + *x = ListTopicSubscriptionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTopicSubscriptionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSubscriptionsResponse) ProtoMessage() {} + +func (x *ListTopicSubscriptionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[10] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSubscriptionsResponse.ProtoReflect.Descriptor instead. +func (*ListTopicSubscriptionsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{10} +} + +func (x *ListTopicSubscriptionsResponse) GetSubscriptions() []string { + if x != nil { + return x.Subscriptions + } + return nil +} + +func (x *ListTopicSubscriptionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `ListTopicSnapshots` method. +type ListTopicSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the topic that snapshots are attached to. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` + // Maximum number of snapshot names to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The value returned by the last `ListTopicSnapshotsResponse`; indicates + // that this is a continuation of a prior `ListTopicSnapshots` call, and + // that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListTopicSnapshotsRequest) Reset() { + *x = ListTopicSnapshotsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTopicSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSnapshotsRequest) ProtoMessage() {} + +func (x *ListTopicSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[11] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListTopicSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{11} +} + +func (x *ListTopicSnapshotsRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *ListTopicSnapshotsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListTopicSnapshotsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListTopicSnapshots` method. +type ListTopicSnapshotsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The names of the snapshots that match the request. + Snapshots []string `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + // If not empty, indicates that there may be more snapshots that match + // the request; this value should be passed in a new + // `ListTopicSnapshotsRequest` to get more snapshots. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListTopicSnapshotsResponse) Reset() { + *x = ListTopicSnapshotsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListTopicSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListTopicSnapshotsResponse) ProtoMessage() {} + +func (x *ListTopicSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[12] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListTopicSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListTopicSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{12} +} + +func (x *ListTopicSnapshotsResponse) GetSnapshots() []string { + if x != nil { + return x.Snapshots + } + return nil +} + +func (x *ListTopicSnapshotsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `DeleteTopic` method. +type DeleteTopicRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. Name of the topic to delete. + // Format is `projects/{project}/topics/{topic}`. + Topic string `protobuf:"bytes,1,opt,name=topic,proto3" json:"topic,omitempty"` +} + +func (x *DeleteTopicRequest) Reset() { + *x = DeleteTopicRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteTopicRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteTopicRequest) ProtoMessage() {} + +func (x *DeleteTopicRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteTopicRequest.ProtoReflect.Descriptor instead. +func (*DeleteTopicRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{13} +} + +func (x *DeleteTopicRequest) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +// Request for the DetachSubscription method. +type DetachSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription to detach. + // Format is `projects/{project}/subscriptions/{subscription}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *DetachSubscriptionRequest) Reset() { + *x = DetachSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DetachSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DetachSubscriptionRequest) ProtoMessage() {} + +func (x *DetachSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[14] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DetachSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*DetachSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{14} +} + +func (x *DetachSubscriptionRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Response for the DetachSubscription method. +// Reserved for future use. +type DetachSubscriptionResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *DetachSubscriptionResponse) Reset() { + *x = DetachSubscriptionResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DetachSubscriptionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DetachSubscriptionResponse) ProtoMessage() {} + +func (x *DetachSubscriptionResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[15] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DetachSubscriptionResponse.ProtoReflect.Descriptor instead. +func (*DetachSubscriptionResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{15} +} + +// A subscription resource. +type Subscription struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription. It must have the format + // `"projects/{project}/subscriptions/{subscription}"`. `{subscription}` must + // start with a letter, and contain only letters (`[A-Za-z]`), numbers + // (`[0-9]`), dashes (`-`), underscores (`_`), periods (`.`), tildes (`~`), + // plus (`+`) or percent signs (`%`). It must be between 3 and 255 characters + // in length, and it must not start with `"goog"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The name of the topic from which this subscription is receiving + // messages. Format is `projects/{project}/topics/{topic}`. The value of this + // field will be `_deleted-topic_` if the topic has been deleted. + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // If push delivery is used with this subscription, this field is + // used to configure it. An empty `pushConfig` signifies that the subscriber + // will pull and ack messages using API methods. + PushConfig *PushConfig `protobuf:"bytes,4,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` + // The approximate amount of time (on a best-effort basis) Pub/Sub waits for + // the subscriber to acknowledge receipt before resending the message. In the + // interval after the message is delivered and before it is acknowledged, it + // is considered to be outstanding. During that time period, the + // message will not be redelivered (on a best-effort basis). + // + // For pull subscriptions, this value is used as the initial value for the ack + // deadline. To override this value for a given message, call + // `ModifyAckDeadline` with the corresponding `ack_id` if using + // non-streaming pull or send the `ack_id` in a + // `StreamingModifyAckDeadlineRequest` if using streaming pull. + // The minimum custom deadline you can specify is 10 seconds. + // The maximum custom deadline you can specify is 600 seconds (10 minutes). + // If this parameter is 0, a default value of 10 seconds is used. + // + // For push delivery, this value is also used to set the request timeout for + // the call to the push endpoint. + // + // If the subscriber never acknowledges the message, the Pub/Sub + // system will eventually redeliver the message. + AckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds,proto3" json:"ack_deadline_seconds,omitempty"` + // Indicates whether to retain acknowledged messages. If true, then + // messages are not expunged from the subscription's backlog, even if they are + // acknowledged, until they fall out of the `message_retention_duration` + // window. This must be true if you would like to [Seek to a timestamp] + // (https://cloud.google.com/pubsub/docs/replay-overview#seek_to_a_time). + RetainAckedMessages bool `protobuf:"varint,7,opt,name=retain_acked_messages,json=retainAckedMessages,proto3" json:"retain_acked_messages,omitempty"` + // How long to retain unacknowledged messages in the subscription's backlog, + // from the moment a message is published. + // If `retain_acked_messages` is true, then this also configures the retention + // of acknowledged messages, and thus configures how far back in time a `Seek` + // can be done. Defaults to 7 days. Cannot be more than 7 days or less than 10 + // minutes. + MessageRetentionDuration *durationpb.Duration `protobuf:"bytes,8,opt,name=message_retention_duration,json=messageRetentionDuration,proto3" json:"message_retention_duration,omitempty"` + // See Creating and + // managing labels. + Labels map[string]string `protobuf:"bytes,9,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // If true, messages published with the same `ordering_key` in `PubsubMessage` + // will be delivered to the subscribers in the order in which they + // are received by the Pub/Sub system. Otherwise, they may be delivered in + // any order. + EnableMessageOrdering bool `protobuf:"varint,10,opt,name=enable_message_ordering,json=enableMessageOrdering,proto3" json:"enable_message_ordering,omitempty"` + // A policy that specifies the conditions for this subscription's expiration. + // A subscription is considered active as long as any connected subscriber is + // successfully consuming messages from the subscription or is issuing + // operations on the subscription. If `expiration_policy` is not set, a + // *default policy* with `ttl` of 31 days will be used. The minimum allowed + // value for `expiration_policy.ttl` is 1 day. + ExpirationPolicy *ExpirationPolicy `protobuf:"bytes,11,opt,name=expiration_policy,json=expirationPolicy,proto3" json:"expiration_policy,omitempty"` + // An expression written in the Pub/Sub [filter + // language](https://cloud.google.com/pubsub/docs/filtering). If non-empty, + // then only `PubsubMessage`s whose `attributes` field matches the filter are + // delivered on this subscription. If empty, then no messages are filtered + // out. + Filter string `protobuf:"bytes,12,opt,name=filter,proto3" json:"filter,omitempty"` + // A policy that specifies the conditions for dead lettering messages in + // this subscription. If dead_letter_policy is not set, dead lettering + // is disabled. + // + // The Cloud Pub/Sub service account associated with this subscriptions's + // parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + // permission to Acknowledge() messages on this subscription. + DeadLetterPolicy *DeadLetterPolicy `protobuf:"bytes,13,opt,name=dead_letter_policy,json=deadLetterPolicy,proto3" json:"dead_letter_policy,omitempty"` + // A policy that specifies how Pub/Sub retries message delivery for this + // subscription. + // + // If not set, the default retry policy is applied. This generally implies + // that messages will be retried as soon as possible for healthy subscribers. + // RetryPolicy will be triggered on NACKs or acknowledgement deadline + // exceeded events for a given message. + RetryPolicy *RetryPolicy `protobuf:"bytes,14,opt,name=retry_policy,json=retryPolicy,proto3" json:"retry_policy,omitempty"` + // Indicates whether the subscription is detached from its topic. Detached + // subscriptions don't receive messages from their topic and don't retain any + // backlog. `Pull` and `StreamingPull` requests will return + // FAILED_PRECONDITION. If the subscription is a push subscription, pushes to + // the endpoint will not be made. + Detached bool `protobuf:"varint,15,opt,name=detached,proto3" json:"detached,omitempty"` +} + +func (x *Subscription) Reset() { + *x = Subscription{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Subscription) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Subscription) ProtoMessage() {} + +func (x *Subscription) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[16] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Subscription.ProtoReflect.Descriptor instead. +func (*Subscription) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{16} +} + +func (x *Subscription) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Subscription) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Subscription) GetPushConfig() *PushConfig { + if x != nil { + return x.PushConfig + } + return nil +} + +func (x *Subscription) GetAckDeadlineSeconds() int32 { + if x != nil { + return x.AckDeadlineSeconds + } + return 0 +} + +func (x *Subscription) GetRetainAckedMessages() bool { + if x != nil { + return x.RetainAckedMessages + } + return false +} + +func (x *Subscription) GetMessageRetentionDuration() *durationpb.Duration { + if x != nil { + return x.MessageRetentionDuration + } + return nil +} + +func (x *Subscription) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +func (x *Subscription) GetEnableMessageOrdering() bool { + if x != nil { + return x.EnableMessageOrdering + } + return false +} + +func (x *Subscription) GetExpirationPolicy() *ExpirationPolicy { + if x != nil { + return x.ExpirationPolicy + } + return nil +} + +func (x *Subscription) GetFilter() string { + if x != nil { + return x.Filter + } + return "" +} + +func (x *Subscription) GetDeadLetterPolicy() *DeadLetterPolicy { + if x != nil { + return x.DeadLetterPolicy + } + return nil +} + +func (x *Subscription) GetRetryPolicy() *RetryPolicy { + if x != nil { + return x.RetryPolicy + } + return nil +} + +func (x *Subscription) GetDetached() bool { + if x != nil { + return x.Detached + } + return false +} + +// A policy that specifies how Cloud Pub/Sub retries message delivery. +// +// Retry delay will be exponential based on provided minimum and maximum +// backoffs. https://en.wikipedia.org/wiki/Exponential_backoff. +// +// RetryPolicy will be triggered on NACKs or acknowledgement deadline exceeded +// events for a given message. +// +// Retry Policy is implemented on a best effort basis. At times, the delay +// between consecutive deliveries may not match the configuration. That is, +// delay can be more or less than configured backoff. +type RetryPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The minimum delay between consecutive deliveries of a given message. + // Value should be between 0 and 600 seconds. Defaults to 10 seconds. + MinimumBackoff *durationpb.Duration `protobuf:"bytes,1,opt,name=minimum_backoff,json=minimumBackoff,proto3" json:"minimum_backoff,omitempty"` + // The maximum delay between consecutive deliveries of a given message. + // Value should be between 0 and 600 seconds. Defaults to 600 seconds. + MaximumBackoff *durationpb.Duration `protobuf:"bytes,2,opt,name=maximum_backoff,json=maximumBackoff,proto3" json:"maximum_backoff,omitempty"` +} + +func (x *RetryPolicy) Reset() { + *x = RetryPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RetryPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RetryPolicy) ProtoMessage() {} + +func (x *RetryPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[17] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RetryPolicy.ProtoReflect.Descriptor instead. +func (*RetryPolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{17} +} + +func (x *RetryPolicy) GetMinimumBackoff() *durationpb.Duration { + if x != nil { + return x.MinimumBackoff + } + return nil +} + +func (x *RetryPolicy) GetMaximumBackoff() *durationpb.Duration { + if x != nil { + return x.MaximumBackoff + } + return nil +} + +// Dead lettering is done on a best effort basis. The same message might be +// dead lettered multiple times. +// +// If validation on any of the fields fails at subscription creation/updation, +// the create/update subscription request will fail. +type DeadLetterPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the topic to which dead letter messages should be published. + // Format is `projects/{project}/topics/{topic}`.The Cloud Pub/Sub service + // account associated with the enclosing subscription's parent project (i.e., + // service-{project_number}@gcp-sa-pubsub.iam.gserviceaccount.com) must have + // permission to Publish() to this topic. + // + // The operation will fail if the topic does not exist. + // Users should ensure that there is a subscription attached to this topic + // since messages published to a topic with no subscriptions are lost. + DeadLetterTopic string `protobuf:"bytes,1,opt,name=dead_letter_topic,json=deadLetterTopic,proto3" json:"dead_letter_topic,omitempty"` + // The maximum number of delivery attempts for any message. The value must be + // between 5 and 100. + // + // The number of delivery attempts is defined as 1 + (the sum of number of + // NACKs and number of times the acknowledgement deadline has been exceeded + // for the message). + // + // A NACK is any call to ModifyAckDeadline with a 0 deadline. Note that + // client libraries may automatically extend ack_deadlines. + // + // This field will be honored on a best effort basis. + // + // If this parameter is 0, a default value of 5 is used. + MaxDeliveryAttempts int32 `protobuf:"varint,2,opt,name=max_delivery_attempts,json=maxDeliveryAttempts,proto3" json:"max_delivery_attempts,omitempty"` +} + +func (x *DeadLetterPolicy) Reset() { + *x = DeadLetterPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeadLetterPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeadLetterPolicy) ProtoMessage() {} + +func (x *DeadLetterPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[18] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeadLetterPolicy.ProtoReflect.Descriptor instead. +func (*DeadLetterPolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{18} +} + +func (x *DeadLetterPolicy) GetDeadLetterTopic() string { + if x != nil { + return x.DeadLetterTopic + } + return "" +} + +func (x *DeadLetterPolicy) GetMaxDeliveryAttempts() int32 { + if x != nil { + return x.MaxDeliveryAttempts + } + return 0 +} + +// A policy that specifies the conditions for resource expiration (i.e., +// automatic resource deletion). +type ExpirationPolicy struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Specifies the "time-to-live" duration for an associated resource. The + // resource expires if it is not active for a period of `ttl`. The definition + // of "activity" depends on the type of the associated resource. The minimum + // and maximum allowed values for `ttl` depend on the type of the associated + // resource, as well. If `ttl` is not set, the associated resource never + // expires. + Ttl *durationpb.Duration `protobuf:"bytes,1,opt,name=ttl,proto3" json:"ttl,omitempty"` +} + +func (x *ExpirationPolicy) Reset() { + *x = ExpirationPolicy{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExpirationPolicy) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExpirationPolicy) ProtoMessage() {} + +func (x *ExpirationPolicy) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[19] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExpirationPolicy.ProtoReflect.Descriptor instead. +func (*ExpirationPolicy) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{19} +} + +func (x *ExpirationPolicy) GetTtl() *durationpb.Duration { + if x != nil { + return x.Ttl + } + return nil +} + +// Configuration for a push delivery endpoint. +type PushConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A URL locating the endpoint to which messages should be pushed. + // For example, a Webhook endpoint might use `https://example.com/push`. + PushEndpoint string `protobuf:"bytes,1,opt,name=push_endpoint,json=pushEndpoint,proto3" json:"push_endpoint,omitempty"` + // Endpoint configuration attributes that can be used to control different + // aspects of the message delivery. + // + // The only currently supported attribute is `x-goog-version`, which you can + // use to change the format of the pushed message. This attribute + // indicates the version of the data expected by the endpoint. This + // controls the shape of the pushed message (i.e., its fields and metadata). + // + // If not present during the `CreateSubscription` call, it will default to + // the version of the Pub/Sub API used to make such call. If not present in a + // `ModifyPushConfig` call, its value will not be changed. `GetSubscription` + // calls will always return a valid version, even if the subscription was + // created without this attribute. + // + // The only supported values for the `x-goog-version` attribute are: + // + // * `v1beta1`: uses the push format defined in the v1beta1 Pub/Sub API. + // * `v1` or `v1beta2`: uses the push format defined in the v1 Pub/Sub API. + // + // For example: + //
attributes { "x-goog-version": "v1" } 
+ Attributes map[string]string `protobuf:"bytes,2,rep,name=attributes,proto3" json:"attributes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // An authentication method used by push endpoints to verify the source of + // push requests. This can be used with push endpoints that are private by + // default to allow requests only from the Cloud Pub/Sub system, for example. + // This field is optional and should be set only by users interested in + // authenticated push. + // + // Types that are assignable to AuthenticationMethod: + // *PushConfig_OidcToken_ + AuthenticationMethod isPushConfig_AuthenticationMethod `protobuf_oneof:"authentication_method"` +} + +func (x *PushConfig) Reset() { + *x = PushConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig) ProtoMessage() {} + +func (x *PushConfig) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[20] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig.ProtoReflect.Descriptor instead. +func (*PushConfig) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20} +} + +func (x *PushConfig) GetPushEndpoint() string { + if x != nil { + return x.PushEndpoint + } + return "" +} + +func (x *PushConfig) GetAttributes() map[string]string { + if x != nil { + return x.Attributes + } + return nil +} + +func (m *PushConfig) GetAuthenticationMethod() isPushConfig_AuthenticationMethod { + if m != nil { + return m.AuthenticationMethod + } + return nil +} + +func (x *PushConfig) GetOidcToken() *PushConfig_OidcToken { + if x, ok := x.GetAuthenticationMethod().(*PushConfig_OidcToken_); ok { + return x.OidcToken + } + return nil +} + +type isPushConfig_AuthenticationMethod interface { + isPushConfig_AuthenticationMethod() +} + +type PushConfig_OidcToken_ struct { + // If specified, Pub/Sub will generate and attach an OIDC JWT token as an + // `Authorization` header in the HTTP request for every pushed message. + OidcToken *PushConfig_OidcToken `protobuf:"bytes,3,opt,name=oidc_token,json=oidcToken,proto3,oneof"` +} + +func (*PushConfig_OidcToken_) isPushConfig_AuthenticationMethod() {} + +// A message and its corresponding acknowledgment ID. +type ReceivedMessage struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // This ID can be used to acknowledge the received message. + AckId string `protobuf:"bytes,1,opt,name=ack_id,json=ackId,proto3" json:"ack_id,omitempty"` + // The message. + Message *PubsubMessage `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The approximate number of times that Cloud Pub/Sub has attempted to deliver + // the associated message to a subscriber. + // + // More precisely, this is 1 + (number of NACKs) + + // (number of ack_deadline exceeds) for this message. + // + // A NACK is any call to ModifyAckDeadline with a 0 deadline. An ack_deadline + // exceeds event is whenever a message is not acknowledged within + // ack_deadline. Note that ack_deadline is initially + // Subscription.ackDeadlineSeconds, but may get extended automatically by + // the client library. + // + // Upon the first delivery of a given message, `delivery_attempt` will have a + // value of 1. The value is calculated at best effort and is approximate. + // + // If a DeadLetterPolicy is not set on the subscription, this will be 0. + DeliveryAttempt int32 `protobuf:"varint,3,opt,name=delivery_attempt,json=deliveryAttempt,proto3" json:"delivery_attempt,omitempty"` +} + +func (x *ReceivedMessage) Reset() { + *x = ReceivedMessage{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ReceivedMessage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ReceivedMessage) ProtoMessage() {} + +func (x *ReceivedMessage) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[21] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ReceivedMessage.ProtoReflect.Descriptor instead. +func (*ReceivedMessage) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{21} +} + +func (x *ReceivedMessage) GetAckId() string { + if x != nil { + return x.AckId + } + return "" +} + +func (x *ReceivedMessage) GetMessage() *PubsubMessage { + if x != nil { + return x.Message + } + return nil +} + +func (x *ReceivedMessage) GetDeliveryAttempt() int32 { + if x != nil { + return x.DeliveryAttempt + } + return 0 +} + +// Request for the GetSubscription method. +type GetSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription to get. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *GetSubscriptionRequest) Reset() { + *x = GetSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSubscriptionRequest) ProtoMessage() {} + +func (x *GetSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[22] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*GetSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{22} +} + +func (x *GetSubscriptionRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Request for the UpdateSubscription method. +type UpdateSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The updated subscription object. + Subscription *Subscription `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. Indicates which fields in the provided subscription to update. + // Must be specified and non-empty. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSubscriptionRequest) Reset() { + *x = UpdateSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSubscriptionRequest) ProtoMessage() {} + +func (x *UpdateSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[23] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*UpdateSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{23} +} + +func (x *UpdateSubscriptionRequest) GetSubscription() *Subscription { + if x != nil { + return x.Subscription + } + return nil +} + +func (x *UpdateSubscriptionRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// Request for the `ListSubscriptions` method. +type ListSubscriptionsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the project in which to list subscriptions. + // Format is `projects/{project-id}`. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Maximum number of subscriptions to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The value returned by the last `ListSubscriptionsResponse`; indicates that + // this is a continuation of a prior `ListSubscriptions` call, and that the + // system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSubscriptionsRequest) Reset() { + *x = ListSubscriptionsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSubscriptionsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSubscriptionsRequest) ProtoMessage() {} + +func (x *ListSubscriptionsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[24] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSubscriptionsRequest.ProtoReflect.Descriptor instead. +func (*ListSubscriptionsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{24} +} + +func (x *ListSubscriptionsRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListSubscriptionsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSubscriptionsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListSubscriptions` method. +type ListSubscriptionsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The subscriptions that match the request. + Subscriptions []*Subscription `protobuf:"bytes,1,rep,name=subscriptions,proto3" json:"subscriptions,omitempty"` + // If not empty, indicates that there may be more subscriptions that match + // the request; this value should be passed in a new + // `ListSubscriptionsRequest` to get more subscriptions. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSubscriptionsResponse) Reset() { + *x = ListSubscriptionsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSubscriptionsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSubscriptionsResponse) ProtoMessage() {} + +func (x *ListSubscriptionsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[25] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSubscriptionsResponse.ProtoReflect.Descriptor instead. +func (*ListSubscriptionsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{25} +} + +func (x *ListSubscriptionsResponse) GetSubscriptions() []*Subscription { + if x != nil { + return x.Subscriptions + } + return nil +} + +func (x *ListSubscriptionsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the DeleteSubscription method. +type DeleteSubscriptionRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription to delete. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` +} + +func (x *DeleteSubscriptionRequest) Reset() { + *x = DeleteSubscriptionRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteSubscriptionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSubscriptionRequest) ProtoMessage() {} + +func (x *DeleteSubscriptionRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[26] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSubscriptionRequest.ProtoReflect.Descriptor instead. +func (*DeleteSubscriptionRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{26} +} + +func (x *DeleteSubscriptionRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Request for the ModifyPushConfig method. +type ModifyPushConfigRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. The push configuration for future deliveries. + // + // An empty `pushConfig` indicates that the Pub/Sub system should + // stop pushing messages from the given subscription and allow + // messages to be pulled and acknowledged - effectively pausing + // the subscription if `Pull` or `StreamingPull` is not called. + PushConfig *PushConfig `protobuf:"bytes,2,opt,name=push_config,json=pushConfig,proto3" json:"push_config,omitempty"` +} + +func (x *ModifyPushConfigRequest) Reset() { + *x = ModifyPushConfigRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ModifyPushConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModifyPushConfigRequest) ProtoMessage() {} + +func (x *ModifyPushConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[27] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModifyPushConfigRequest.ProtoReflect.Descriptor instead. +func (*ModifyPushConfigRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{27} +} + +func (x *ModifyPushConfigRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *ModifyPushConfigRequest) GetPushConfig() *PushConfig { + if x != nil { + return x.PushConfig + } + return nil +} + +// Request for the `Pull` method. +type PullRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription from which messages should be pulled. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Optional. If this field set to true, the system will respond immediately + // even if it there are no messages available to return in the `Pull` + // response. Otherwise, the system may wait (for a bounded amount of time) + // until at least one message is available, rather than returning no messages. + // Warning: setting this field to `true` is discouraged because it adversely + // impacts the performance of `Pull` operations. We recommend that users do + // not set this field. + // + // Deprecated: Do not use. + ReturnImmediately bool `protobuf:"varint,2,opt,name=return_immediately,json=returnImmediately,proto3" json:"return_immediately,omitempty"` + // Required. The maximum number of messages to return for this request. Must + // be a positive integer. The Pub/Sub system may return fewer than the number + // specified. + MaxMessages int32 `protobuf:"varint,3,opt,name=max_messages,json=maxMessages,proto3" json:"max_messages,omitempty"` +} + +func (x *PullRequest) Reset() { + *x = PullRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PullRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullRequest) ProtoMessage() {} + +func (x *PullRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[28] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullRequest.ProtoReflect.Descriptor instead. +func (*PullRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{28} +} + +func (x *PullRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +// Deprecated: Do not use. +func (x *PullRequest) GetReturnImmediately() bool { + if x != nil { + return x.ReturnImmediately + } + return false +} + +func (x *PullRequest) GetMaxMessages() int32 { + if x != nil { + return x.MaxMessages + } + return 0 +} + +// Response for the `Pull` method. +type PullResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Received Pub/Sub messages. The list will be empty if there are no more + // messages available in the backlog. For JSON, the response can be entirely + // empty. The Pub/Sub system may return fewer than the `maxMessages` requested + // even if there are more messages available in the backlog. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages,proto3" json:"received_messages,omitempty"` +} + +func (x *PullResponse) Reset() { + *x = PullResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PullResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PullResponse) ProtoMessage() {} + +func (x *PullResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[29] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PullResponse.ProtoReflect.Descriptor instead. +func (*PullResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{29} +} + +func (x *PullResponse) GetReceivedMessages() []*ReceivedMessage { + if x != nil { + return x.ReceivedMessages + } + return nil +} + +// Request for the ModifyAckDeadline method. +type ModifyAckDeadlineRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the subscription. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. List of acknowledgment IDs. + AckIds []string `protobuf:"bytes,4,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` + // Required. The new ack deadline with respect to the time this request was + // sent to the Pub/Sub system. For example, if the value is 10, the new ack + // deadline will expire 10 seconds after the `ModifyAckDeadline` call was + // made. Specifying zero might immediately make the message available for + // delivery to another subscriber client. This typically results in an + // increase in the rate of message redeliveries (that is, duplicates). + // The minimum deadline you can specify is 0 seconds. + // The maximum deadline you can specify is 600 seconds (10 minutes). + AckDeadlineSeconds int32 `protobuf:"varint,3,opt,name=ack_deadline_seconds,json=ackDeadlineSeconds,proto3" json:"ack_deadline_seconds,omitempty"` +} + +func (x *ModifyAckDeadlineRequest) Reset() { + *x = ModifyAckDeadlineRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ModifyAckDeadlineRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModifyAckDeadlineRequest) ProtoMessage() {} + +func (x *ModifyAckDeadlineRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[30] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModifyAckDeadlineRequest.ProtoReflect.Descriptor instead. +func (*ModifyAckDeadlineRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{30} +} + +func (x *ModifyAckDeadlineRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *ModifyAckDeadlineRequest) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +func (x *ModifyAckDeadlineRequest) GetAckDeadlineSeconds() int32 { + if x != nil { + return x.AckDeadlineSeconds + } + return 0 +} + +// Request for the Acknowledge method. +type AcknowledgeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription whose message is being acknowledged. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Required. The acknowledgment ID for the messages being acknowledged that + // was returned by the Pub/Sub system in the `Pull` response. Must not be + // empty. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` +} + +func (x *AcknowledgeRequest) Reset() { + *x = AcknowledgeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AcknowledgeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AcknowledgeRequest) ProtoMessage() {} + +func (x *AcknowledgeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[31] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AcknowledgeRequest.ProtoReflect.Descriptor instead. +func (*AcknowledgeRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{31} +} + +func (x *AcknowledgeRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *AcknowledgeRequest) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +// Request for the `StreamingPull` streaming RPC method. This request is used to +// establish the initial stream as well as to stream acknowledgements and ack +// deadline modifications from the client to the server. +type StreamingPullRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription for which to initialize the new stream. This + // must be provided in the first request on the stream, and must not be set in + // subsequent requests from client to server. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // List of acknowledgement IDs for acknowledging previously received messages + // (received on this stream or a different stream). If an ack ID has expired, + // the corresponding message may be redelivered later. Acknowledging a message + // more than once will not result in an error. If the acknowledgement ID is + // malformed, the stream will be aborted with status `INVALID_ARGUMENT`. + AckIds []string `protobuf:"bytes,2,rep,name=ack_ids,json=ackIds,proto3" json:"ack_ids,omitempty"` + // The list of new ack deadlines for the IDs listed in + // `modify_deadline_ack_ids`. The size of this list must be the same as the + // size of `modify_deadline_ack_ids`. If it differs the stream will be aborted + // with `INVALID_ARGUMENT`. Each element in this list is applied to the + // element in the same position in `modify_deadline_ack_ids`. The new ack + // deadline is with respect to the time this request was sent to the Pub/Sub + // system. Must be >= 0. For example, if the value is 10, the new ack deadline + // will expire 10 seconds after this request is received. If the value is 0, + // the message is immediately made available for another streaming or + // non-streaming pull request. If the value is < 0 (an error), the stream will + // be aborted with status `INVALID_ARGUMENT`. + ModifyDeadlineSeconds []int32 `protobuf:"varint,3,rep,packed,name=modify_deadline_seconds,json=modifyDeadlineSeconds,proto3" json:"modify_deadline_seconds,omitempty"` + // List of acknowledgement IDs whose deadline will be modified based on the + // corresponding element in `modify_deadline_seconds`. This field can be used + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. + ModifyDeadlineAckIds []string `protobuf:"bytes,4,rep,name=modify_deadline_ack_ids,json=modifyDeadlineAckIds,proto3" json:"modify_deadline_ack_ids,omitempty"` + // Required. The ack deadline to use for the stream. This must be provided in + // the first request on the stream, but it can also be updated on subsequent + // requests from client to server. The minimum deadline you can specify is 10 + // seconds. The maximum deadline you can specify is 600 seconds (10 minutes). + StreamAckDeadlineSeconds int32 `protobuf:"varint,5,opt,name=stream_ack_deadline_seconds,json=streamAckDeadlineSeconds,proto3" json:"stream_ack_deadline_seconds,omitempty"` + // A unique identifier that is used to distinguish client instances from each + // other. Only needs to be provided on the initial request. When a stream + // disconnects and reconnects for the same stream, the client_id should be set + // to the same value so that state associated with the old stream can be + // transferred to the new stream. The same client_id should not be used for + // different client instances. + ClientId string `protobuf:"bytes,6,opt,name=client_id,json=clientId,proto3" json:"client_id,omitempty"` + // Flow control settings for the maximum number of outstanding messages. When + // there are `max_outstanding_messages` or more currently sent to the + // streaming pull client that have not yet been acked or nacked, the server + // stops sending more messages. The sending of messages resumes once the + // number of outstanding messages is less than this value. If the value is + // <= 0, there is no limit to the number of outstanding messages. This + // property can only be set on the initial StreamingPullRequest. If it is set + // on a subsequent request, the stream will be aborted with status + // `INVALID_ARGUMENT`. + MaxOutstandingMessages int64 `protobuf:"varint,7,opt,name=max_outstanding_messages,json=maxOutstandingMessages,proto3" json:"max_outstanding_messages,omitempty"` + // Flow control settings for the maximum number of outstanding bytes. When + // there are `max_outstanding_bytes` or more worth of messages currently sent + // to the streaming pull client that have not yet been acked or nacked, the + // server will stop sending more messages. The sending of messages resumes + // once the number of outstanding bytes is less than this value. If the value + // is <= 0, there is no limit to the number of outstanding bytes. This + // property can only be set on the initial StreamingPullRequest. If it is set + // on a subsequent request, the stream will be aborted with status + // `INVALID_ARGUMENT`. + MaxOutstandingBytes int64 `protobuf:"varint,8,opt,name=max_outstanding_bytes,json=maxOutstandingBytes,proto3" json:"max_outstanding_bytes,omitempty"` +} + +func (x *StreamingPullRequest) Reset() { + *x = StreamingPullRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamingPullRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullRequest) ProtoMessage() {} + +func (x *StreamingPullRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[32] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullRequest.ProtoReflect.Descriptor instead. +func (*StreamingPullRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{32} +} + +func (x *StreamingPullRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *StreamingPullRequest) GetAckIds() []string { + if x != nil { + return x.AckIds + } + return nil +} + +func (x *StreamingPullRequest) GetModifyDeadlineSeconds() []int32 { + if x != nil { + return x.ModifyDeadlineSeconds + } + return nil +} + +func (x *StreamingPullRequest) GetModifyDeadlineAckIds() []string { + if x != nil { + return x.ModifyDeadlineAckIds + } + return nil +} + +func (x *StreamingPullRequest) GetStreamAckDeadlineSeconds() int32 { + if x != nil { + return x.StreamAckDeadlineSeconds + } + return 0 +} + +func (x *StreamingPullRequest) GetClientId() string { + if x != nil { + return x.ClientId + } + return "" +} + +func (x *StreamingPullRequest) GetMaxOutstandingMessages() int64 { + if x != nil { + return x.MaxOutstandingMessages + } + return 0 +} + +func (x *StreamingPullRequest) GetMaxOutstandingBytes() int64 { + if x != nil { + return x.MaxOutstandingBytes + } + return 0 +} + +// Response for the `StreamingPull` method. This response is used to stream +// messages from the server to the client. +type StreamingPullResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Received Pub/Sub messages. This will not be empty. + ReceivedMessages []*ReceivedMessage `protobuf:"bytes,1,rep,name=received_messages,json=receivedMessages,proto3" json:"received_messages,omitempty"` +} + +func (x *StreamingPullResponse) Reset() { + *x = StreamingPullResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StreamingPullResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StreamingPullResponse) ProtoMessage() {} + +func (x *StreamingPullResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[33] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StreamingPullResponse.ProtoReflect.Descriptor instead. +func (*StreamingPullResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{33} +} + +func (x *StreamingPullResponse) GetReceivedMessages() []*ReceivedMessage { + if x != nil { + return x.ReceivedMessages + } + return nil +} + +// Request for the `CreateSnapshot` method. +type CreateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. User-provided name for this snapshot. If the name is not provided + // in the request, the server will assign a random name for this snapshot on + // the same project as the subscription. Note that for REST API requests, you + // must specify a name. See the resource + // name rules. Format is `projects/{project}/snapshots/{snap}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The subscription whose backlog the snapshot retains. + // Specifically, the created snapshot is guaranteed to retain: + // (a) The existing backlog on the subscription. More precisely, this is + // defined as the messages in the subscription's backlog that are + // unacknowledged upon the successful completion of the + // `CreateSnapshot` request; as well as: + // (b) Any messages published to the subscription's topic following the + // successful completion of the CreateSnapshot request. + // Format is `projects/{project}/subscriptions/{sub}`. + Subscription string `protobuf:"bytes,2,opt,name=subscription,proto3" json:"subscription,omitempty"` + // See Creating and + // managing labels. + Labels map[string]string `protobuf:"bytes,3,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *CreateSnapshotRequest) Reset() { + *x = CreateSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSnapshotRequest) ProtoMessage() {} + +func (x *CreateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[34] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{34} +} + +func (x *CreateSnapshotRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateSnapshotRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (x *CreateSnapshotRequest) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Request for the UpdateSnapshot method. +type UpdateSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The updated snapshot object. + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + // Required. Indicates which fields in the provided snapshot to update. + // Must be specified and non-empty. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` +} + +func (x *UpdateSnapshotRequest) Reset() { + *x = UpdateSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *UpdateSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSnapshotRequest) ProtoMessage() {} + +func (x *UpdateSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[35] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSnapshotRequest.ProtoReflect.Descriptor instead. +func (*UpdateSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{35} +} + +func (x *UpdateSnapshotRequest) GetSnapshot() *Snapshot { + if x != nil { + return x.Snapshot + } + return nil +} + +func (x *UpdateSnapshotRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// A snapshot resource. Snapshots are used in +// [Seek](https://cloud.google.com/pubsub/docs/replay-overview) +// operations, which allow you to manage message acknowledgments in bulk. That +// is, you can set the acknowledgment state of messages in an existing +// subscription to the state captured by a snapshot. +type Snapshot struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The name of the snapshot. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The name of the topic from which this snapshot is retaining messages. + Topic string `protobuf:"bytes,2,opt,name=topic,proto3" json:"topic,omitempty"` + // The snapshot is guaranteed to exist up until this time. + // A newly-created snapshot expires no later than 7 days from the time of its + // creation. Its exact lifetime is determined at creation by the existing + // backlog in the source subscription. Specifically, the lifetime of the + // snapshot is `7 days - (age of oldest unacked message in the subscription)`. + // For example, consider a subscription whose oldest unacked message is 3 days + // old. If a snapshot is created from this subscription, the snapshot -- which + // will always capture this 3-day-old backlog as long as the snapshot + // exists -- will expire in 4 days. The service will refuse to create a + // snapshot that would expire in less than 1 hour after creation. + ExpireTime *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=expire_time,json=expireTime,proto3" json:"expire_time,omitempty"` + // See [Creating and managing labels] + // (https://cloud.google.com/pubsub/docs/labels). + Labels map[string]string `protobuf:"bytes,4,rep,name=labels,proto3" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *Snapshot) Reset() { + *x = Snapshot{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Snapshot) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Snapshot) ProtoMessage() {} + +func (x *Snapshot) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[36] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Snapshot.ProtoReflect.Descriptor instead. +func (*Snapshot) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{36} +} + +func (x *Snapshot) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Snapshot) GetTopic() string { + if x != nil { + return x.Topic + } + return "" +} + +func (x *Snapshot) GetExpireTime() *timestamppb.Timestamp { + if x != nil { + return x.ExpireTime + } + return nil +} + +func (x *Snapshot) GetLabels() map[string]string { + if x != nil { + return x.Labels + } + return nil +} + +// Request for the GetSnapshot method. +type GetSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the snapshot to get. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` +} + +func (x *GetSnapshotRequest) Reset() { + *x = GetSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *GetSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetSnapshotRequest) ProtoMessage() {} + +func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[37] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. +func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{37} +} + +func (x *GetSnapshotRequest) GetSnapshot() string { + if x != nil { + return x.Snapshot + } + return "" +} + +// Request for the `ListSnapshots` method. +type ListSnapshotsRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the project in which to list snapshots. + // Format is `projects/{project-id}`. + Project string `protobuf:"bytes,1,opt,name=project,proto3" json:"project,omitempty"` + // Maximum number of snapshots to return. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // The value returned by the last `ListSnapshotsResponse`; indicates that this + // is a continuation of a prior `ListSnapshots` call, and that the system + // should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` +} + +func (x *ListSnapshotsRequest) Reset() { + *x = ListSnapshotsRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnapshotsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnapshotsRequest) ProtoMessage() {} + +func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[38] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. +func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{38} +} + +func (x *ListSnapshotsRequest) GetProject() string { + if x != nil { + return x.Project + } + return "" +} + +func (x *ListSnapshotsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListSnapshotsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +// Response for the `ListSnapshots` method. +type ListSnapshotsResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resulting snapshots. + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + // If not empty, indicates that there may be more snapshot that match the + // request; this value should be passed in a new `ListSnapshotsRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` +} + +func (x *ListSnapshotsResponse) Reset() { + *x = ListSnapshotsResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ListSnapshotsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSnapshotsResponse) ProtoMessage() {} + +func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[39] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. +func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{39} +} + +func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { + if x != nil { + return x.Snapshots + } + return nil +} + +func (x *ListSnapshotsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +// Request for the `DeleteSnapshot` method. +type DeleteSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The name of the snapshot to delete. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` +} + +func (x *DeleteSnapshotRequest) Reset() { + *x = DeleteSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DeleteSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSnapshotRequest) ProtoMessage() {} + +func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[40] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. +func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{40} +} + +func (x *DeleteSnapshotRequest) GetSnapshot() string { + if x != nil { + return x.Snapshot + } + return "" +} + +// Request for the `Seek` method. +type SeekRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The subscription to affect. + Subscription string `protobuf:"bytes,1,opt,name=subscription,proto3" json:"subscription,omitempty"` + // Types that are assignable to Target: + // *SeekRequest_Time + // *SeekRequest_Snapshot + Target isSeekRequest_Target `protobuf_oneof:"target"` +} + +func (x *SeekRequest) Reset() { + *x = SeekRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SeekRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeekRequest) ProtoMessage() {} + +func (x *SeekRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[41] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeekRequest.ProtoReflect.Descriptor instead. +func (*SeekRequest) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{41} +} + +func (x *SeekRequest) GetSubscription() string { + if x != nil { + return x.Subscription + } + return "" +} + +func (m *SeekRequest) GetTarget() isSeekRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *SeekRequest) GetTime() *timestamppb.Timestamp { + if x, ok := x.GetTarget().(*SeekRequest_Time); ok { + return x.Time + } + return nil +} + +func (x *SeekRequest) GetSnapshot() string { + if x, ok := x.GetTarget().(*SeekRequest_Snapshot); ok { + return x.Snapshot + } + return "" +} + +type isSeekRequest_Target interface { + isSeekRequest_Target() +} + +type SeekRequest_Time struct { + // The time to seek to. + // Messages retained in the subscription that were published before this + // time are marked as acknowledged, and messages retained in the + // subscription that were published after this time are marked as + // unacknowledged. Note that this operation affects only those messages + // retained in the subscription (configured by the combination of + // `message_retention_duration` and `retain_acked_messages`). For example, + // if `time` corresponds to a point before the message retention + // window (or to a point before the system's notion of the subscription + // creation time), only retained messages will be marked as unacknowledged, + // and already-expunged messages will not be restored. + Time *timestamppb.Timestamp `protobuf:"bytes,2,opt,name=time,proto3,oneof"` +} + +type SeekRequest_Snapshot struct { + // The snapshot to seek to. The snapshot's topic must be the same as that of + // the provided subscription. + // Format is `projects/{project}/snapshots/{snap}`. + Snapshot string `protobuf:"bytes,3,opt,name=snapshot,proto3,oneof"` +} + +func (*SeekRequest_Time) isSeekRequest_Target() {} + +func (*SeekRequest_Snapshot) isSeekRequest_Target() {} + +// Response for the `Seek` method (this response is empty). +type SeekResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *SeekResponse) Reset() { + *x = SeekResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SeekResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SeekResponse) ProtoMessage() {} + +func (x *SeekResponse) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[42] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SeekResponse.ProtoReflect.Descriptor instead. +func (*SeekResponse) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{42} +} + +// Contains information needed for generating an +// [OpenID Connect +// token](https://developers.google.com/identity/protocols/OpenIDConnect). +type PushConfig_OidcToken struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // [Service account + // email](https://cloud.google.com/iam/docs/service-accounts) + // to be used for generating the OIDC token. The caller (for + // CreateSubscription, UpdateSubscription, and ModifyPushConfig RPCs) must + // have the iam.serviceAccounts.actAs permission for the service account. + ServiceAccountEmail string `protobuf:"bytes,1,opt,name=service_account_email,json=serviceAccountEmail,proto3" json:"service_account_email,omitempty"` + // Audience to be used when generating OIDC token. The audience claim + // identifies the recipients that the JWT is intended for. The audience + // value is a single case-sensitive string. Having multiple values (array) + // for the audience field is not supported. More info about the OIDC JWT + // token audience here: https://tools.ietf.org/html/rfc7519#section-4.1.3 + // Note: if not specified, the Push endpoint URL will be used. + Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"` +} + +func (x *PushConfig_OidcToken) Reset() { + *x = PushConfig_OidcToken{} + if protoimpl.UnsafeEnabled { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *PushConfig_OidcToken) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PushConfig_OidcToken) ProtoMessage() {} + +func (x *PushConfig_OidcToken) ProtoReflect() protoreflect.Message { + mi := &file_google_pubsub_v1_pubsub_proto_msgTypes[46] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PushConfig_OidcToken.ProtoReflect.Descriptor instead. +func (*PushConfig_OidcToken) Descriptor() ([]byte, []int) { + return file_google_pubsub_v1_pubsub_proto_rawDescGZIP(), []int{20, 0} +} + +func (x *PushConfig_OidcToken) GetServiceAccountEmail() string { + if x != nil { + return x.ServiceAccountEmail + } + return "" +} + +func (x *PushConfig_OidcToken) GetAudience() string { + if x != nil { + return x.Audience + } + return "" +} + +var File_google_pubsub_v1_pubsub_proto protoreflect.FileDescriptor + +var file_google_pubsub_v1_pubsub_proto_rawDesc = []byte{ + 0x0a, 0x1d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, + 0x76, 0x31, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, + 0x10, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x61, 0x6e, + 0x6e, 0x6f, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, + 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, + 0x69, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, + 0x75, 0x66, 0x2f, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x56, 0x0a, 0x14, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, + 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x3e, 0x0a, 0x1b, + 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x73, 0x69, 0x73, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x5f, 0x72, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x09, 0x52, 0x19, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x50, 0x65, 0x72, 0x73, 0x69, 0x73, + 0x74, 0x65, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x67, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0xee, 0x02, 0x0a, + 0x05, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x17, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, + 0x3b, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x12, 0x5c, 0x0a, 0x16, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x5f, + 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x52, 0x14, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x53, 0x74, 0x6f, + 0x72, 0x61, 0x67, 0x65, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x20, 0x0a, 0x0c, 0x6b, 0x6d, + 0x73, 0x5f, 0x6b, 0x65, 0x79, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0a, 0x6b, 0x6d, 0x73, 0x4b, 0x65, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x39, 0x0a, 0x0b, + 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, + 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, + 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x54, 0xea, 0x41, 0x51, 0x0a, 0x1b, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x7d, 0x12, 0x0f, 0x5f, 0x64, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x2d, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x5f, 0x22, 0xb4, 0x02, + 0x0a, 0x0d, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x4f, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, + 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x0c, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x6b, + 0x65, 0x79, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, + 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x1a, 0x3d, 0x0a, 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, + 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x3a, 0x02, 0x38, 0x01, 0x22, 0x4c, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x22, 0x8a, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x32, 0x0a, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, + 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0x8d, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x40, 0x0a, + 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x08, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, + 0x32, 0x0a, 0x0f, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5f, 0x69, 0x64, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x49, 0x64, 0x73, 0x22, 0x9e, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, + 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, + 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, + 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, + 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, + 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6d, 0x0a, 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x52, 0x06, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, + 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x22, 0x96, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x97, 0x01, 0x0a, + 0x1e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x4d, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x42, 0x27, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, + 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x92, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x54, + 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, + 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x62, 0x0a, 0x1a, 0x4c, + 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, + 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, + 0x4f, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x22, 0x6b, 0x0a, 0x19, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x1c, 0x0a, + 0x1a, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8a, 0x07, 0x0a, 0x0c, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x3d, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, + 0x66, 0x69, 0x67, 0x52, 0x0a, 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x30, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, + 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x05, 0x52, 0x12, 0x61, + 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, + 0x73, 0x12, 0x32, 0x0a, 0x15, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x5f, 0x61, 0x63, 0x6b, 0x65, + 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x08, + 0x52, 0x13, 0x72, 0x65, 0x74, 0x61, 0x69, 0x6e, 0x41, 0x63, 0x6b, 0x65, 0x64, 0x4d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x57, 0x0a, 0x1a, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x5f, 0x72, 0x65, 0x74, 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x64, 0x75, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x18, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x65, 0x74, + 0x65, 0x6e, 0x74, 0x69, 0x6f, 0x6e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, + 0x6c, 0x73, 0x12, 0x36, 0x0a, 0x17, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x5f, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x15, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x4f, 0x72, 0x64, 0x65, 0x72, 0x69, 0x6e, 0x67, 0x12, 0x4f, 0x0a, 0x11, 0x65, 0x78, + 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, + 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x10, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x66, + 0x69, 0x6c, 0x74, 0x65, 0x72, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x66, 0x69, 0x6c, + 0x74, 0x65, 0x72, 0x12, 0x50, 0x0a, 0x12, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, 0x74, + 0x65, 0x72, 0x5f, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x52, 0x10, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x40, 0x0a, 0x0c, 0x72, 0x65, 0x74, 0x72, 0x79, 0x5f, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x65, 0x74, 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x0b, 0x72, 0x65, 0x74, 0x72, + 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x1a, 0x0a, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x64, 0x65, 0x74, 0x61, 0x63, + 0x68, 0x65, 0x64, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, + 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x3a, 0x58, + 0xea, 0x41, 0x55, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2f, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x7d, 0x22, 0x95, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x74, + 0x72, 0x79, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x6d, 0x69, 0x6e, 0x69, + 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x6d, 0x69, + 0x6e, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x12, 0x42, 0x0a, 0x0f, + 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x5f, 0x62, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0e, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x42, 0x61, 0x63, 0x6b, 0x6f, 0x66, 0x66, + 0x22, 0x72, 0x0a, 0x10, 0x44, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2a, 0x0a, 0x11, 0x64, 0x65, 0x61, 0x64, 0x5f, 0x6c, 0x65, 0x74, + 0x74, 0x65, 0x72, 0x5f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x0f, 0x64, 0x65, 0x61, 0x64, 0x4c, 0x65, 0x74, 0x74, 0x65, 0x72, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x32, 0x0a, 0x15, 0x6d, 0x61, 0x78, 0x5f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, + 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, + 0x13, 0x6d, 0x61, 0x78, 0x44, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, 0x74, 0x74, 0x65, + 0x6d, 0x70, 0x74, 0x73, 0x22, 0x3f, 0x0a, 0x10, 0x45, 0x78, 0x70, 0x69, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x2b, 0x0a, 0x03, 0x74, 0x74, 0x6c, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x03, 0x74, 0x74, 0x6c, 0x22, 0xfd, 0x02, 0x0a, 0x0a, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, + 0x6e, 0x66, 0x69, 0x67, 0x12, 0x23, 0x0a, 0x0d, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x65, 0x6e, 0x64, + 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x70, 0x75, 0x73, + 0x68, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x4c, 0x0a, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0a, 0x61, 0x74, 0x74, + 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x47, 0x0a, 0x0a, 0x6f, 0x69, 0x64, 0x63, 0x5f, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x48, 0x00, 0x52, 0x09, 0x6f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, + 0x1a, 0x5b, 0x0a, 0x09, 0x4f, 0x69, 0x64, 0x63, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x32, 0x0a, + 0x15, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x13, 0x73, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x45, 0x6d, 0x61, 0x69, + 0x6c, 0x12, 0x1a, 0x0a, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x08, 0x61, 0x75, 0x64, 0x69, 0x65, 0x6e, 0x63, 0x65, 0x1a, 0x3d, 0x0a, + 0x0f, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, + 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, + 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x42, 0x17, 0x0a, 0x15, + 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, + 0x65, 0x74, 0x68, 0x6f, 0x64, 0x22, 0x8e, 0x01, 0x0a, 0x0f, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, + 0x65, 0x64, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x15, 0x0a, 0x06, 0x61, 0x63, 0x6b, + 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x61, 0x63, 0x6b, 0x49, 0x64, + 0x12, 0x39, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, 0x4d, 0x65, 0x73, 0x73, 0x61, + 0x67, 0x65, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x29, 0x0a, 0x10, 0x64, + 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x5f, 0x61, 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0f, 0x64, 0x65, 0x6c, 0x69, 0x76, 0x65, 0x72, 0x79, 0x41, + 0x74, 0x74, 0x65, 0x6d, 0x70, 0x74, 0x22, 0x68, 0x0a, 0x16, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0xa6, 0x01, 0x0a, 0x19, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x47, + 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, + 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, + 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, 0xa5, 0x01, 0x0a, 0x18, 0x4c, 0x69, + 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, + 0x2b, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, + 0x6e, 0x61, 0x67, 0x65, 0x72, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, + 0x7a, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, + 0x6e, 0x22, 0x89, 0x01, 0x0a, 0x19, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x44, 0x0a, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0d, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, + 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, + 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6b, 0x0a, + 0x19, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0xad, 0x01, 0x0a, 0x17, 0x4d, + 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, + 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x42, 0x0a, 0x0b, 0x70, 0x75, 0x73, 0x68, 0x5f, 0x63, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, + 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, + 0x70, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0xbb, 0x01, 0x0a, 0x0b, 0x50, + 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x34, 0x0a, 0x12, 0x72, 0x65, + 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x42, 0x05, 0x18, 0x01, 0xe0, 0x41, 0x01, 0x52, 0x11, 0x72, + 0x65, 0x74, 0x75, 0x72, 0x6e, 0x49, 0x6d, 0x6d, 0x65, 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, + 0x12, 0x26, 0x0a, 0x0c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0b, 0x6d, 0x61, 0x78, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0x5e, 0x0a, 0x0c, 0x50, 0x75, 0x6c, 0x6c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xbf, 0x01, 0x0a, 0x18, 0x4d, 0x6f, 0x64, + 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, + 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, + 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, + 0x49, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x14, 0x61, 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x12, 0x61, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, + 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x12, 0x41, + 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, + 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, + 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0x1c, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, + 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, 0x73, 0x22, + 0xbd, 0x03, 0x0a, 0x14, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, + 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, + 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x17, 0x0a, 0x07, 0x61, 0x63, 0x6b, 0x5f, + 0x69, 0x64, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x06, 0x61, 0x63, 0x6b, 0x49, 0x64, + 0x73, 0x12, 0x36, 0x0a, 0x17, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, + 0x28, 0x05, 0x52, 0x15, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, + 0x6e, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x6f, 0x64, + 0x69, 0x66, 0x79, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x61, 0x63, 0x6b, + 0x5f, 0x69, 0x64, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x14, 0x6d, 0x6f, 0x64, 0x69, + 0x66, 0x79, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x41, 0x63, 0x6b, 0x49, 0x64, 0x73, + 0x12, 0x42, 0x0a, 0x1b, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x5f, 0x61, 0x63, 0x6b, 0x5f, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x05, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x18, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x53, 0x65, 0x63, + 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1b, 0x0a, 0x09, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x49, + 0x64, 0x12, 0x38, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, + 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x07, 0x20, + 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, + 0x69, 0x6e, 0x67, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x32, 0x0a, 0x15, 0x6d, + 0x61, 0x78, 0x5f, 0x6f, 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x5f, 0x62, + 0x79, 0x74, 0x65, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x13, 0x6d, 0x61, 0x78, 0x4f, + 0x75, 0x74, 0x73, 0x74, 0x61, 0x6e, 0x64, 0x69, 0x6e, 0x67, 0x42, 0x79, 0x74, 0x65, 0x73, 0x22, + 0x67, 0x0a, 0x15, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4e, 0x0a, 0x11, 0x72, 0x65, 0x63, 0x65, + 0x69, 0x76, 0x65, 0x64, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x4d, + 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x52, 0x10, 0x72, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, + 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x22, 0xab, 0x02, 0x0a, 0x15, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x4e, + 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, + 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, + 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x33, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, + 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x96, 0x01, 0x0a, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x3b, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x40, 0x0a, + 0x0b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, + 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x22, + 0xdc, 0x02, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x36, 0x0a, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, + 0x20, 0xfa, 0x41, 0x1d, 0x0a, 0x1b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x52, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x78, 0x70, 0x69, + 0x72, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x78, 0x70, 0x69, 0x72, + 0x65, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x3e, 0x0a, 0x06, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x18, + 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x73, 0x45, + 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, + 0x3a, 0x4c, 0xea, 0x41, 0x49, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x7d, 0x22, 0x58, + 0x0a, 0x12, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, + 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xa1, 0x01, 0x0a, 0x14, 0x4c, 0x69, 0x73, + 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x4d, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x33, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x2d, 0x0a, 0x2b, 0x63, 0x6c, 0x6f, 0x75, + 0x64, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, + 0x50, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x12, 0x1b, 0x0a, 0x09, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x05, 0x52, 0x08, 0x70, 0x61, 0x67, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x1d, 0x0a, + 0x0a, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x09, 0x70, 0x61, 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x79, 0x0a, 0x15, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x09, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, + 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, + 0x67, 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x42, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x22, 0xdc, 0x01, 0x0a, 0x0b, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x4e, 0x0a, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, + 0x41, 0x24, 0x0a, 0x22, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x30, 0x0a, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x48, 0x00, + 0x52, 0x04, 0x74, 0x69, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x48, 0x00, 0x52, + 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x42, 0x08, 0x0a, 0x06, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x32, 0xa3, 0x0b, 0x0a, 0x09, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x65, + 0x72, 0x12, 0x71, 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x12, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x1a, 0x1e, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x7d, 0x0a, 0x0b, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, + 0x69, 0x63, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x32, 0x24, 0x2f, 0x76, 0x31, 0x2f, + 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x01, 0x2a, 0x12, 0x93, 0x01, 0x0a, 0x07, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x12, + 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x73, 0x68, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x43, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x22, 0x27, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, + 0x62, 0x6c, 0x69, 0x73, 0x68, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0e, 0x74, 0x6f, 0x70, 0x69, 0x63, + 0x2c, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x12, 0x77, 0x0a, 0x08, 0x47, 0x65, 0x74, + 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x6f, 0x70, 0x69, + 0x63, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x12, 0x8a, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x73, 0x12, 0x23, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x31, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, + 0x6f, 0x70, 0x69, 0x63, 0x73, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0xba, 0x01, 0x0a, 0x16, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, + 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, + 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0xaa, 0x01, 0x0a, + 0x12, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x53, 0x6e, 0x61, + 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x39, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2b, 0x12, 0x29, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, + 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, + 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0xda, 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x7c, 0x0a, 0x0b, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, + 0x74, 0x65, 0x54, 0x6f, 0x70, 0x69, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x2a, 0x1f, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x73, 0x2f, 0x2a, 0x7d, 0xda, + 0x41, 0x05, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x12, 0xad, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x74, 0x61, + 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x44, 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x74, 0x61, 0x63, 0x68, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3c, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x36, 0x22, 0x34, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x64, 0x65, 0x74, 0x61, 0x63, 0x68, 0x1a, 0x70, 0xca, 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, + 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, + 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, + 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x32, 0x83, 0x15, 0x0a, 0x0a, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x62, 0x65, 0x72, 0x12, 0xb4, 0x01, 0x0a, 0x12, 0x43, 0x72, 0x65, + 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x1a, + 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, + 0x5e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x1a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, + 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, + 0x2a, 0xda, 0x41, 0x2b, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x74, 0x6f, 0x70, 0x69, 0x63, 0x2c, 0x70, + 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x64, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, + 0xa1, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x12, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0xa0, 0x01, 0x0a, 0x12, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, + 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x32, + 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, + 0x6f, 0x6e, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0xa6, 0x01, 0x0a, 0x11, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x2a, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x28, 0x12, 0x26, 0x2f, + 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x9f, 0x01, 0x0a, 0x12, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x53, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x44, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x2f, 0x2a, 0x2d, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x7d, 0xda, 0x41, 0x0c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x12, 0xcf, 0x01, 0x0a, 0x11, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, + 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, + 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x76, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x41, 0x63, 0x6b, 0x44, 0x65, 0x61, 0x64, + 0x6c, 0x69, 0x6e, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x29, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x2c, 0x61, + 0x63, 0x6b, 0x5f, 0x64, 0x65, 0x61, 0x64, 0x6c, 0x69, 0x6e, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, + 0x6e, 0x64, 0x73, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, + 0x64, 0x67, 0x65, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, + 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, 0x65, 0x64, + 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x5b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x39, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x61, 0x63, 0x6b, 0x6e, 0x6f, 0x77, 0x6c, + 0x65, 0x64, 0x67, 0x65, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x61, 0x63, 0x6b, 0x5f, 0x69, 0x64, 0x73, 0x12, 0xb3, + 0x01, 0x0a, 0x04, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x6c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, + 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x70, 0x75, + 0x6c, 0x6c, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x2c, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x5f, 0x69, 0x6d, 0x6d, 0x65, + 0x64, 0x69, 0x61, 0x74, 0x65, 0x6c, 0x79, 0x2c, 0x6d, 0x61, 0x78, 0x5f, 0x6d, 0x65, 0x73, 0x73, + 0x61, 0x67, 0x65, 0x73, 0x12, 0x66, 0x0a, 0x0d, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, + 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, + 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x27, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, + 0x2e, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x50, 0x75, 0x6c, 0x6c, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x28, 0x01, 0x30, 0x01, 0x12, 0xbb, 0x01, 0x0a, + 0x10, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, + 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, + 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x22, 0x64, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x43, 0x22, 0x3e, 0x2f, 0x76, + 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, + 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, + 0x66, 0x79, 0x50, 0x75, 0x73, 0x68, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x3a, 0x01, 0x2a, 0xda, + 0x41, 0x18, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x2c, 0x70, + 0x75, 0x73, 0x68, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, 0x89, 0x01, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x24, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, + 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x38, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x27, 0x12, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x08, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x96, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, 0x26, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x2e, 0x76, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x34, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x24, 0x12, 0x22, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x73, 0xda, 0x41, 0x07, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x12, + 0x97, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, + 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, + 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x40, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x1a, + 0x21, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x11, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8c, 0x01, 0x0a, 0x0e, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x22, 0x35, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x32, 0x2a, 0x2f, 0x76, 0x31, 0x2f, 0x7b, + 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, + 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x27, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x38, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x27, 0x2a, 0x25, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x08, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x84, 0x01, 0x0a, 0x04, 0x53, 0x65, 0x65, 0x6b, 0x12, + 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, + 0x31, 0x2e, 0x53, 0x65, 0x65, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x31, 0x2f, 0x7b, 0x73, 0x75, 0x62, + 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x75, 0x62, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, + 0x6e, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x65, 0x6b, 0x3a, 0x01, 0x2a, 0x1a, 0x70, 0xca, + 0x41, 0x15, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, + 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0x55, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0x42, + 0xae, 0x01, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x50, 0x75, 0x62, 0x73, 0x75, 0x62, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x36, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x70, + 0x75, 0x62, 0x73, 0x75, 0x62, 0x2f, 0x76, 0x31, 0x3b, 0x70, 0x75, 0x62, 0x73, 0x75, 0x62, 0xf8, + 0x01, 0x01, 0xaa, 0x02, 0x16, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, + 0x64, 0x2e, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x2e, 0x56, 0x31, 0xca, 0x02, 0x16, 0x47, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x50, 0x75, 0x62, 0x53, 0x75, + 0x62, 0x5c, 0x56, 0x31, 0xea, 0x02, 0x19, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, + 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x50, 0x75, 0x62, 0x53, 0x75, 0x62, 0x3a, 0x3a, 0x56, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_pubsub_v1_pubsub_proto_rawDescOnce sync.Once + file_google_pubsub_v1_pubsub_proto_rawDescData = file_google_pubsub_v1_pubsub_proto_rawDesc +) + +func file_google_pubsub_v1_pubsub_proto_rawDescGZIP() []byte { + file_google_pubsub_v1_pubsub_proto_rawDescOnce.Do(func() { + file_google_pubsub_v1_pubsub_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_pubsub_v1_pubsub_proto_rawDescData) + }) + return file_google_pubsub_v1_pubsub_proto_rawDescData +} + +var file_google_pubsub_v1_pubsub_proto_msgTypes = make([]protoimpl.MessageInfo, 50) +var file_google_pubsub_v1_pubsub_proto_goTypes = []interface{}{ + (*MessageStoragePolicy)(nil), // 0: google.pubsub.v1.MessageStoragePolicy + (*Topic)(nil), // 1: google.pubsub.v1.Topic + (*PubsubMessage)(nil), // 2: google.pubsub.v1.PubsubMessage + (*GetTopicRequest)(nil), // 3: google.pubsub.v1.GetTopicRequest + (*UpdateTopicRequest)(nil), // 4: google.pubsub.v1.UpdateTopicRequest + (*PublishRequest)(nil), // 5: google.pubsub.v1.PublishRequest + (*PublishResponse)(nil), // 6: google.pubsub.v1.PublishResponse + (*ListTopicsRequest)(nil), // 7: google.pubsub.v1.ListTopicsRequest + (*ListTopicsResponse)(nil), // 8: google.pubsub.v1.ListTopicsResponse + (*ListTopicSubscriptionsRequest)(nil), // 9: google.pubsub.v1.ListTopicSubscriptionsRequest + (*ListTopicSubscriptionsResponse)(nil), // 10: google.pubsub.v1.ListTopicSubscriptionsResponse + (*ListTopicSnapshotsRequest)(nil), // 11: google.pubsub.v1.ListTopicSnapshotsRequest + (*ListTopicSnapshotsResponse)(nil), // 12: google.pubsub.v1.ListTopicSnapshotsResponse + (*DeleteTopicRequest)(nil), // 13: google.pubsub.v1.DeleteTopicRequest + (*DetachSubscriptionRequest)(nil), // 14: google.pubsub.v1.DetachSubscriptionRequest + (*DetachSubscriptionResponse)(nil), // 15: google.pubsub.v1.DetachSubscriptionResponse + (*Subscription)(nil), // 16: google.pubsub.v1.Subscription + (*RetryPolicy)(nil), // 17: google.pubsub.v1.RetryPolicy + (*DeadLetterPolicy)(nil), // 18: google.pubsub.v1.DeadLetterPolicy + (*ExpirationPolicy)(nil), // 19: google.pubsub.v1.ExpirationPolicy + (*PushConfig)(nil), // 20: google.pubsub.v1.PushConfig + (*ReceivedMessage)(nil), // 21: google.pubsub.v1.ReceivedMessage + (*GetSubscriptionRequest)(nil), // 22: google.pubsub.v1.GetSubscriptionRequest + (*UpdateSubscriptionRequest)(nil), // 23: google.pubsub.v1.UpdateSubscriptionRequest + (*ListSubscriptionsRequest)(nil), // 24: google.pubsub.v1.ListSubscriptionsRequest + (*ListSubscriptionsResponse)(nil), // 25: google.pubsub.v1.ListSubscriptionsResponse + (*DeleteSubscriptionRequest)(nil), // 26: google.pubsub.v1.DeleteSubscriptionRequest + (*ModifyPushConfigRequest)(nil), // 27: google.pubsub.v1.ModifyPushConfigRequest + (*PullRequest)(nil), // 28: google.pubsub.v1.PullRequest + (*PullResponse)(nil), // 29: google.pubsub.v1.PullResponse + (*ModifyAckDeadlineRequest)(nil), // 30: google.pubsub.v1.ModifyAckDeadlineRequest + (*AcknowledgeRequest)(nil), // 31: google.pubsub.v1.AcknowledgeRequest + (*StreamingPullRequest)(nil), // 32: google.pubsub.v1.StreamingPullRequest + (*StreamingPullResponse)(nil), // 33: google.pubsub.v1.StreamingPullResponse + (*CreateSnapshotRequest)(nil), // 34: google.pubsub.v1.CreateSnapshotRequest + (*UpdateSnapshotRequest)(nil), // 35: google.pubsub.v1.UpdateSnapshotRequest + (*Snapshot)(nil), // 36: google.pubsub.v1.Snapshot + (*GetSnapshotRequest)(nil), // 37: google.pubsub.v1.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 38: google.pubsub.v1.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 39: google.pubsub.v1.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 40: google.pubsub.v1.DeleteSnapshotRequest + (*SeekRequest)(nil), // 41: google.pubsub.v1.SeekRequest + (*SeekResponse)(nil), // 42: google.pubsub.v1.SeekResponse + nil, // 43: google.pubsub.v1.Topic.LabelsEntry + nil, // 44: google.pubsub.v1.PubsubMessage.AttributesEntry + nil, // 45: google.pubsub.v1.Subscription.LabelsEntry + (*PushConfig_OidcToken)(nil), // 46: google.pubsub.v1.PushConfig.OidcToken + nil, // 47: google.pubsub.v1.PushConfig.AttributesEntry + nil, // 48: google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + nil, // 49: google.pubsub.v1.Snapshot.LabelsEntry + (*timestamppb.Timestamp)(nil), // 50: google.protobuf.Timestamp + (*fieldmaskpb.FieldMask)(nil), // 51: google.protobuf.FieldMask + (*durationpb.Duration)(nil), // 52: google.protobuf.Duration + (*emptypb.Empty)(nil), // 53: google.protobuf.Empty +} +var file_google_pubsub_v1_pubsub_proto_depIdxs = []int32{ + 43, // 0: google.pubsub.v1.Topic.labels:type_name -> google.pubsub.v1.Topic.LabelsEntry + 0, // 1: google.pubsub.v1.Topic.message_storage_policy:type_name -> google.pubsub.v1.MessageStoragePolicy + 44, // 2: google.pubsub.v1.PubsubMessage.attributes:type_name -> google.pubsub.v1.PubsubMessage.AttributesEntry + 50, // 3: google.pubsub.v1.PubsubMessage.publish_time:type_name -> google.protobuf.Timestamp + 1, // 4: google.pubsub.v1.UpdateTopicRequest.topic:type_name -> google.pubsub.v1.Topic + 51, // 5: google.pubsub.v1.UpdateTopicRequest.update_mask:type_name -> google.protobuf.FieldMask + 2, // 6: google.pubsub.v1.PublishRequest.messages:type_name -> google.pubsub.v1.PubsubMessage + 1, // 7: google.pubsub.v1.ListTopicsResponse.topics:type_name -> google.pubsub.v1.Topic + 20, // 8: google.pubsub.v1.Subscription.push_config:type_name -> google.pubsub.v1.PushConfig + 52, // 9: google.pubsub.v1.Subscription.message_retention_duration:type_name -> google.protobuf.Duration + 45, // 10: google.pubsub.v1.Subscription.labels:type_name -> google.pubsub.v1.Subscription.LabelsEntry + 19, // 11: google.pubsub.v1.Subscription.expiration_policy:type_name -> google.pubsub.v1.ExpirationPolicy + 18, // 12: google.pubsub.v1.Subscription.dead_letter_policy:type_name -> google.pubsub.v1.DeadLetterPolicy + 17, // 13: google.pubsub.v1.Subscription.retry_policy:type_name -> google.pubsub.v1.RetryPolicy + 52, // 14: google.pubsub.v1.RetryPolicy.minimum_backoff:type_name -> google.protobuf.Duration + 52, // 15: google.pubsub.v1.RetryPolicy.maximum_backoff:type_name -> google.protobuf.Duration + 52, // 16: google.pubsub.v1.ExpirationPolicy.ttl:type_name -> google.protobuf.Duration + 47, // 17: google.pubsub.v1.PushConfig.attributes:type_name -> google.pubsub.v1.PushConfig.AttributesEntry + 46, // 18: google.pubsub.v1.PushConfig.oidc_token:type_name -> google.pubsub.v1.PushConfig.OidcToken + 2, // 19: google.pubsub.v1.ReceivedMessage.message:type_name -> google.pubsub.v1.PubsubMessage + 16, // 20: google.pubsub.v1.UpdateSubscriptionRequest.subscription:type_name -> google.pubsub.v1.Subscription + 51, // 21: google.pubsub.v1.UpdateSubscriptionRequest.update_mask:type_name -> google.protobuf.FieldMask + 16, // 22: google.pubsub.v1.ListSubscriptionsResponse.subscriptions:type_name -> google.pubsub.v1.Subscription + 20, // 23: google.pubsub.v1.ModifyPushConfigRequest.push_config:type_name -> google.pubsub.v1.PushConfig + 21, // 24: google.pubsub.v1.PullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 21, // 25: google.pubsub.v1.StreamingPullResponse.received_messages:type_name -> google.pubsub.v1.ReceivedMessage + 48, // 26: google.pubsub.v1.CreateSnapshotRequest.labels:type_name -> google.pubsub.v1.CreateSnapshotRequest.LabelsEntry + 36, // 27: google.pubsub.v1.UpdateSnapshotRequest.snapshot:type_name -> google.pubsub.v1.Snapshot + 51, // 28: google.pubsub.v1.UpdateSnapshotRequest.update_mask:type_name -> google.protobuf.FieldMask + 50, // 29: google.pubsub.v1.Snapshot.expire_time:type_name -> google.protobuf.Timestamp + 49, // 30: google.pubsub.v1.Snapshot.labels:type_name -> google.pubsub.v1.Snapshot.LabelsEntry + 36, // 31: google.pubsub.v1.ListSnapshotsResponse.snapshots:type_name -> google.pubsub.v1.Snapshot + 50, // 32: google.pubsub.v1.SeekRequest.time:type_name -> google.protobuf.Timestamp + 1, // 33: google.pubsub.v1.Publisher.CreateTopic:input_type -> google.pubsub.v1.Topic + 4, // 34: google.pubsub.v1.Publisher.UpdateTopic:input_type -> google.pubsub.v1.UpdateTopicRequest + 5, // 35: google.pubsub.v1.Publisher.Publish:input_type -> google.pubsub.v1.PublishRequest + 3, // 36: google.pubsub.v1.Publisher.GetTopic:input_type -> google.pubsub.v1.GetTopicRequest + 7, // 37: google.pubsub.v1.Publisher.ListTopics:input_type -> google.pubsub.v1.ListTopicsRequest + 9, // 38: google.pubsub.v1.Publisher.ListTopicSubscriptions:input_type -> google.pubsub.v1.ListTopicSubscriptionsRequest + 11, // 39: google.pubsub.v1.Publisher.ListTopicSnapshots:input_type -> google.pubsub.v1.ListTopicSnapshotsRequest + 13, // 40: google.pubsub.v1.Publisher.DeleteTopic:input_type -> google.pubsub.v1.DeleteTopicRequest + 14, // 41: google.pubsub.v1.Publisher.DetachSubscription:input_type -> google.pubsub.v1.DetachSubscriptionRequest + 16, // 42: google.pubsub.v1.Subscriber.CreateSubscription:input_type -> google.pubsub.v1.Subscription + 22, // 43: google.pubsub.v1.Subscriber.GetSubscription:input_type -> google.pubsub.v1.GetSubscriptionRequest + 23, // 44: google.pubsub.v1.Subscriber.UpdateSubscription:input_type -> google.pubsub.v1.UpdateSubscriptionRequest + 24, // 45: google.pubsub.v1.Subscriber.ListSubscriptions:input_type -> google.pubsub.v1.ListSubscriptionsRequest + 26, // 46: google.pubsub.v1.Subscriber.DeleteSubscription:input_type -> google.pubsub.v1.DeleteSubscriptionRequest + 30, // 47: google.pubsub.v1.Subscriber.ModifyAckDeadline:input_type -> google.pubsub.v1.ModifyAckDeadlineRequest + 31, // 48: google.pubsub.v1.Subscriber.Acknowledge:input_type -> google.pubsub.v1.AcknowledgeRequest + 28, // 49: google.pubsub.v1.Subscriber.Pull:input_type -> google.pubsub.v1.PullRequest + 32, // 50: google.pubsub.v1.Subscriber.StreamingPull:input_type -> google.pubsub.v1.StreamingPullRequest + 27, // 51: google.pubsub.v1.Subscriber.ModifyPushConfig:input_type -> google.pubsub.v1.ModifyPushConfigRequest + 37, // 52: google.pubsub.v1.Subscriber.GetSnapshot:input_type -> google.pubsub.v1.GetSnapshotRequest + 38, // 53: google.pubsub.v1.Subscriber.ListSnapshots:input_type -> google.pubsub.v1.ListSnapshotsRequest + 34, // 54: google.pubsub.v1.Subscriber.CreateSnapshot:input_type -> google.pubsub.v1.CreateSnapshotRequest + 35, // 55: google.pubsub.v1.Subscriber.UpdateSnapshot:input_type -> google.pubsub.v1.UpdateSnapshotRequest + 40, // 56: google.pubsub.v1.Subscriber.DeleteSnapshot:input_type -> google.pubsub.v1.DeleteSnapshotRequest + 41, // 57: google.pubsub.v1.Subscriber.Seek:input_type -> google.pubsub.v1.SeekRequest + 1, // 58: google.pubsub.v1.Publisher.CreateTopic:output_type -> google.pubsub.v1.Topic + 1, // 59: google.pubsub.v1.Publisher.UpdateTopic:output_type -> google.pubsub.v1.Topic + 6, // 60: google.pubsub.v1.Publisher.Publish:output_type -> google.pubsub.v1.PublishResponse + 1, // 61: google.pubsub.v1.Publisher.GetTopic:output_type -> google.pubsub.v1.Topic + 8, // 62: google.pubsub.v1.Publisher.ListTopics:output_type -> google.pubsub.v1.ListTopicsResponse + 10, // 63: google.pubsub.v1.Publisher.ListTopicSubscriptions:output_type -> google.pubsub.v1.ListTopicSubscriptionsResponse + 12, // 64: google.pubsub.v1.Publisher.ListTopicSnapshots:output_type -> google.pubsub.v1.ListTopicSnapshotsResponse + 53, // 65: google.pubsub.v1.Publisher.DeleteTopic:output_type -> google.protobuf.Empty + 15, // 66: google.pubsub.v1.Publisher.DetachSubscription:output_type -> google.pubsub.v1.DetachSubscriptionResponse + 16, // 67: google.pubsub.v1.Subscriber.CreateSubscription:output_type -> google.pubsub.v1.Subscription + 16, // 68: google.pubsub.v1.Subscriber.GetSubscription:output_type -> google.pubsub.v1.Subscription + 16, // 69: google.pubsub.v1.Subscriber.UpdateSubscription:output_type -> google.pubsub.v1.Subscription + 25, // 70: google.pubsub.v1.Subscriber.ListSubscriptions:output_type -> google.pubsub.v1.ListSubscriptionsResponse + 53, // 71: google.pubsub.v1.Subscriber.DeleteSubscription:output_type -> google.protobuf.Empty + 53, // 72: google.pubsub.v1.Subscriber.ModifyAckDeadline:output_type -> google.protobuf.Empty + 53, // 73: google.pubsub.v1.Subscriber.Acknowledge:output_type -> google.protobuf.Empty + 29, // 74: google.pubsub.v1.Subscriber.Pull:output_type -> google.pubsub.v1.PullResponse + 33, // 75: google.pubsub.v1.Subscriber.StreamingPull:output_type -> google.pubsub.v1.StreamingPullResponse + 53, // 76: google.pubsub.v1.Subscriber.ModifyPushConfig:output_type -> google.protobuf.Empty + 36, // 77: google.pubsub.v1.Subscriber.GetSnapshot:output_type -> google.pubsub.v1.Snapshot + 39, // 78: google.pubsub.v1.Subscriber.ListSnapshots:output_type -> google.pubsub.v1.ListSnapshotsResponse + 36, // 79: google.pubsub.v1.Subscriber.CreateSnapshot:output_type -> google.pubsub.v1.Snapshot + 36, // 80: google.pubsub.v1.Subscriber.UpdateSnapshot:output_type -> google.pubsub.v1.Snapshot + 53, // 81: google.pubsub.v1.Subscriber.DeleteSnapshot:output_type -> google.protobuf.Empty + 42, // 82: google.pubsub.v1.Subscriber.Seek:output_type -> google.pubsub.v1.SeekResponse + 58, // [58:83] is the sub-list for method output_type + 33, // [33:58] is the sub-list for method input_type + 33, // [33:33] is the sub-list for extension type_name + 33, // [33:33] is the sub-list for extension extendee + 0, // [0:33] is the sub-list for field type_name +} + +func init() { file_google_pubsub_v1_pubsub_proto_init() } +func file_google_pubsub_v1_pubsub_proto_init() { + if File_google_pubsub_v1_pubsub_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_pubsub_v1_pubsub_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MessageStoragePolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Topic); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PubsubMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PublishResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTopicsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTopicsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTopicSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTopicSubscriptionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTopicSnapshotsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListTopicSnapshotsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteTopicRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DetachSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DetachSubscriptionResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Subscription); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RetryPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeadLetterPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExpirationPolicy); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ReceivedMessage); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSubscriptionsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSubscriptionsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSubscriptionRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModifyPushConfigRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PullRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PullResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModifyAckDeadlineRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AcknowledgeRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamingPullRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StreamingPullResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*CreateSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*UpdateSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Snapshot); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*GetSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ListSnapshotsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DeleteSnapshotRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SeekRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SeekResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PushConfig_OidcToken); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_google_pubsub_v1_pubsub_proto_msgTypes[20].OneofWrappers = []interface{}{ + (*PushConfig_OidcToken_)(nil), + } + file_google_pubsub_v1_pubsub_proto_msgTypes[41].OneofWrappers = []interface{}{ + (*SeekRequest_Time)(nil), + (*SeekRequest_Snapshot)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_pubsub_v1_pubsub_proto_rawDesc, + NumEnums: 0, + NumMessages: 50, + NumExtensions: 0, + NumServices: 2, + }, + GoTypes: file_google_pubsub_v1_pubsub_proto_goTypes, + DependencyIndexes: file_google_pubsub_v1_pubsub_proto_depIdxs, + MessageInfos: file_google_pubsub_v1_pubsub_proto_msgTypes, + }.Build() + File_google_pubsub_v1_pubsub_proto = out.File + file_google_pubsub_v1_pubsub_proto_rawDesc = nil + file_google_pubsub_v1_pubsub_proto_goTypes = nil + file_google_pubsub_v1_pubsub_proto_depIdxs = nil +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// PublisherClient is the client API for Publisher service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PublisherClient interface { + // Creates the given topic with the given name. See the [resource name rules]( + // https://cloud.google.com/pubsub/docs/admin#resource_names). + CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) + // Updates an existing topic. Note that certain properties of a + // topic are not modifiable. + UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. + Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) + // Lists matching topics. + ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) + // Lists the names of the attached subscriptions on this topic. + ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) + // Lists the names of the snapshots on this topic. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListTopicSnapshots(ctx context.Context, in *ListTopicSnapshotsRequest, opts ...grpc.CallOption) (*ListTopicSnapshotsResponse, error) + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Detaches a subscription from this topic. All messages retained in the + // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests + // will return FAILED_PRECONDITION. If the subscription is a push + // subscription, pushes to the endpoint will stop. + DetachSubscription(ctx context.Context, in *DetachSubscriptionRequest, opts ...grpc.CallOption) (*DetachSubscriptionResponse, error) +} + +type publisherClient struct { + cc grpc.ClientConnInterface +} + +func NewPublisherClient(cc grpc.ClientConnInterface) PublisherClient { + return &publisherClient{cc} +} + +func (c *publisherClient) CreateTopic(ctx context.Context, in *Topic, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/CreateTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) UpdateTopic(ctx context.Context, in *UpdateTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/UpdateTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) Publish(ctx context.Context, in *PublishRequest, opts ...grpc.CallOption) (*PublishResponse, error) { + out := new(PublishResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/Publish", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) GetTopic(ctx context.Context, in *GetTopicRequest, opts ...grpc.CallOption) (*Topic, error) { + out := new(Topic) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/GetTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopics(ctx context.Context, in *ListTopicsRequest, opts ...grpc.CallOption) (*ListTopicsResponse, error) { + out := new(ListTopicsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopicSubscriptions(ctx context.Context, in *ListTopicSubscriptionsRequest, opts ...grpc.CallOption) (*ListTopicSubscriptionsResponse, error) { + out := new(ListTopicSubscriptionsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) ListTopicSnapshots(ctx context.Context, in *ListTopicSnapshotsRequest, opts ...grpc.CallOption) (*ListTopicSnapshotsResponse, error) { + out := new(ListTopicSnapshotsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/ListTopicSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) DeleteTopic(ctx context.Context, in *DeleteTopicRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/DeleteTopic", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *publisherClient) DetachSubscription(ctx context.Context, in *DetachSubscriptionRequest, opts ...grpc.CallOption) (*DetachSubscriptionResponse, error) { + out := new(DetachSubscriptionResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Publisher/DetachSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PublisherServer is the server API for Publisher service. +type PublisherServer interface { + // Creates the given topic with the given name. See the [resource name rules]( + // https://cloud.google.com/pubsub/docs/admin#resource_names). + CreateTopic(context.Context, *Topic) (*Topic, error) + // Updates an existing topic. Note that certain properties of a + // topic are not modifiable. + UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) + // Adds one or more messages to the topic. Returns `NOT_FOUND` if the topic + // does not exist. + Publish(context.Context, *PublishRequest) (*PublishResponse, error) + // Gets the configuration of a topic. + GetTopic(context.Context, *GetTopicRequest) (*Topic, error) + // Lists matching topics. + ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) + // Lists the names of the attached subscriptions on this topic. + ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) + // Lists the names of the snapshots on this topic. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListTopicSnapshots(context.Context, *ListTopicSnapshotsRequest) (*ListTopicSnapshotsResponse, error) + // Deletes the topic with the given name. Returns `NOT_FOUND` if the topic + // does not exist. After a topic is deleted, a new topic may be created with + // the same name; this is an entirely new topic with none of the old + // configuration or subscriptions. Existing subscriptions to this topic are + // not deleted, but their `topic` field is set to `_deleted-topic_`. + DeleteTopic(context.Context, *DeleteTopicRequest) (*emptypb.Empty, error) + // Detaches a subscription from this topic. All messages retained in the + // subscription are dropped. Subsequent `Pull` and `StreamingPull` requests + // will return FAILED_PRECONDITION. If the subscription is a push + // subscription, pushes to the endpoint will stop. + DetachSubscription(context.Context, *DetachSubscriptionRequest) (*DetachSubscriptionResponse, error) +} + +// UnimplementedPublisherServer can be embedded to have forward compatible implementations. +type UnimplementedPublisherServer struct { +} + +func (*UnimplementedPublisherServer) CreateTopic(context.Context, *Topic) (*Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateTopic not implemented") +} +func (*UnimplementedPublisherServer) UpdateTopic(context.Context, *UpdateTopicRequest) (*Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateTopic not implemented") +} +func (*UnimplementedPublisherServer) Publish(context.Context, *PublishRequest) (*PublishResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Publish not implemented") +} +func (*UnimplementedPublisherServer) GetTopic(context.Context, *GetTopicRequest) (*Topic, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetTopic not implemented") +} +func (*UnimplementedPublisherServer) ListTopics(context.Context, *ListTopicsRequest) (*ListTopicsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTopics not implemented") +} +func (*UnimplementedPublisherServer) ListTopicSubscriptions(context.Context, *ListTopicSubscriptionsRequest) (*ListTopicSubscriptionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTopicSubscriptions not implemented") +} +func (*UnimplementedPublisherServer) ListTopicSnapshots(context.Context, *ListTopicSnapshotsRequest) (*ListTopicSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListTopicSnapshots not implemented") +} +func (*UnimplementedPublisherServer) DeleteTopic(context.Context, *DeleteTopicRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteTopic not implemented") +} +func (*UnimplementedPublisherServer) DetachSubscription(context.Context, *DetachSubscriptionRequest) (*DetachSubscriptionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DetachSubscription not implemented") +} + +func RegisterPublisherServer(s *grpc.Server, srv PublisherServer) { + s.RegisterService(&_Publisher_serviceDesc, srv) +} + +func _Publisher_CreateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Topic) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).CreateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/CreateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).CreateTopic(ctx, req.(*Topic)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_UpdateTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).UpdateTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/UpdateTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).UpdateTopic(ctx, req.(*UpdateTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_Publish_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PublishRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).Publish(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/Publish", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).Publish(ctx, req.(*PublishRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_GetTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).GetTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/GetTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).GetTopic(ctx, req.(*GetTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopics(ctx, req.(*ListTopicsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopicSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopicSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopicSubscriptions(ctx, req.(*ListTopicSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_ListTopicSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListTopicSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).ListTopicSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/ListTopicSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).ListTopicSnapshots(ctx, req.(*ListTopicSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_DeleteTopic_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteTopicRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).DeleteTopic(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/DeleteTopic", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).DeleteTopic(ctx, req.(*DeleteTopicRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Publisher_DetachSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DetachSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PublisherServer).DetachSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Publisher/DetachSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PublisherServer).DetachSubscription(ctx, req.(*DetachSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Publisher_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1.Publisher", + HandlerType: (*PublisherServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateTopic", + Handler: _Publisher_CreateTopic_Handler, + }, + { + MethodName: "UpdateTopic", + Handler: _Publisher_UpdateTopic_Handler, + }, + { + MethodName: "Publish", + Handler: _Publisher_Publish_Handler, + }, + { + MethodName: "GetTopic", + Handler: _Publisher_GetTopic_Handler, + }, + { + MethodName: "ListTopics", + Handler: _Publisher_ListTopics_Handler, + }, + { + MethodName: "ListTopicSubscriptions", + Handler: _Publisher_ListTopicSubscriptions_Handler, + }, + { + MethodName: "ListTopicSnapshots", + Handler: _Publisher_ListTopicSnapshots_Handler, + }, + { + MethodName: "DeleteTopic", + Handler: _Publisher_DeleteTopic_Handler, + }, + { + MethodName: "DetachSubscription", + Handler: _Publisher_DetachSubscription_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/pubsub/v1/pubsub.proto", +} + +// SubscriberClient is the client API for Subscriber service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type SubscriberClient interface { + // Creates a subscription to a given topic. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated + // name is populated in the returned Subscription object. Note that for REST + // API requests, you must specify a name in the request. + CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Updates an existing subscription. Note that certain properties of a + // subscription, such as its topic, are not modifiable. + UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Pulls messages from the server. The server may return `UNAVAILABLE` if + // there are too many concurrent pull requests pending for the given + // subscription. + Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgements and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `UNAVAILABLE` to + // reassign server-side resources, in which case, the client should + // re-establish the stream. Flow control can be achieved by configuring the + // underlying RPC channel. + StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Gets the configuration details of a snapshot. Snapshots are used in + // Seek + // operations, which allow you to manage message acknowledgments in bulk. That + // is, you can set the acknowledgment state of messages in an existing + // subscription to the state captured by a snapshot. + GetSnapshot(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Lists the existing snapshots. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) + // Creates a snapshot from the requested subscription. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // If the backlog in the subscription is too old -- and the resulting snapshot + // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. + // See also the `Snapshot.expire_time` field. If the name is not provided in + // the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). The + // generated name is populated in the returned Snapshot object. Note that for + // REST API requests, you must specify a name in the request. + CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Updates an existing snapshot. Snapshots are used in + // Seek + // operations, which allow + // you to manage message acknowledgments in bulk. That is, you can set the + // acknowledgment state of messages in an existing subscription to the state + // captured by a snapshot. + UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) + // Removes an existing snapshot. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // When the snapshot is deleted, all messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. Note that both the subscription and the + // snapshot must be on the same topic. + Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) +} + +type subscriberClient struct { + cc grpc.ClientConnInterface +} + +func NewSubscriberClient(cc grpc.ClientConnInterface) SubscriberClient { + return &subscriberClient{cc} +} + +func (c *subscriberClient) CreateSubscription(ctx context.Context, in *Subscription, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) GetSubscription(ctx context.Context, in *GetSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) UpdateSubscription(ctx context.Context, in *UpdateSubscriptionRequest, opts ...grpc.CallOption) (*Subscription, error) { + out := new(Subscription) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSubscriptions(ctx context.Context, in *ListSubscriptionsRequest, opts ...grpc.CallOption) (*ListSubscriptionsResponse, error) { + out := new(ListSubscriptionsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSubscriptions", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSubscription(ctx context.Context, in *DeleteSubscriptionRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSubscription", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ModifyAckDeadline(ctx context.Context, in *ModifyAckDeadlineRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyAckDeadline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Acknowledge(ctx context.Context, in *AcknowledgeRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Acknowledge", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Pull(ctx context.Context, in *PullRequest, opts ...grpc.CallOption) (*PullResponse, error) { + out := new(PullResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Pull", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) StreamingPull(ctx context.Context, opts ...grpc.CallOption) (Subscriber_StreamingPullClient, error) { + stream, err := c.cc.NewStream(ctx, &_Subscriber_serviceDesc.Streams[0], "/google.pubsub.v1.Subscriber/StreamingPull", opts...) + if err != nil { + return nil, err + } + x := &subscriberStreamingPullClient{stream} + return x, nil +} + +type Subscriber_StreamingPullClient interface { + Send(*StreamingPullRequest) error + Recv() (*StreamingPullResponse, error) + grpc.ClientStream +} + +type subscriberStreamingPullClient struct { + grpc.ClientStream +} + +func (x *subscriberStreamingPullClient) Send(m *StreamingPullRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *subscriberStreamingPullClient) Recv() (*StreamingPullResponse, error) { + m := new(StreamingPullResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *subscriberClient) ModifyPushConfig(ctx context.Context, in *ModifyPushConfigRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ModifyPushConfig", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) GetSnapshot(ctx context.Context, in *GetSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/GetSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) ListSnapshots(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) { + out := new(ListSnapshotsResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) CreateSnapshot(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/CreateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) UpdateSnapshot(ctx context.Context, in *UpdateSnapshotRequest, opts ...grpc.CallOption) (*Snapshot, error) { + out := new(Snapshot) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/UpdateSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) { + out := new(emptypb.Empty) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/DeleteSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *subscriberClient) Seek(ctx context.Context, in *SeekRequest, opts ...grpc.CallOption) (*SeekResponse, error) { + out := new(SeekResponse) + err := c.cc.Invoke(ctx, "/google.pubsub.v1.Subscriber/Seek", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// SubscriberServer is the server API for Subscriber service. +type SubscriberServer interface { + // Creates a subscription to a given topic. See the [resource name rules] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). + // If the subscription already exists, returns `ALREADY_EXISTS`. + // If the corresponding topic doesn't exist, returns `NOT_FOUND`. + // + // If the name is not provided in the request, the server will assign a random + // name for this subscription on the same project as the topic, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). The generated + // name is populated in the returned Subscription object. Note that for REST + // API requests, you must specify a name in the request. + CreateSubscription(context.Context, *Subscription) (*Subscription, error) + // Gets the configuration details of a subscription. + GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) + // Updates an existing subscription. Note that certain properties of a + // subscription, such as its topic, are not modifiable. + UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) + // Lists matching subscriptions. + ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) + // Deletes an existing subscription. All messages retained in the subscription + // are immediately dropped. Calls to `Pull` after deletion will return + // `NOT_FOUND`. After a subscription is deleted, a new one may be created with + // the same name, but the new one has no association with the old + // subscription or its topic unless the same topic is specified. + DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*emptypb.Empty, error) + // Modifies the ack deadline for a specific message. This method is useful + // to indicate that more time is needed to process a message by the + // subscriber, or to make the message available for redelivery if the + // processing was interrupted. Note that this does not modify the + // subscription-level `ackDeadlineSeconds` used for subsequent messages. + ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*emptypb.Empty, error) + // Acknowledges the messages associated with the `ack_ids` in the + // `AcknowledgeRequest`. The Pub/Sub system can remove the relevant messages + // from the subscription. + // + // Acknowledging a message whose ack deadline has expired may succeed, + // but such a message may be redelivered later. Acknowledging a message more + // than once will not result in an error. + Acknowledge(context.Context, *AcknowledgeRequest) (*emptypb.Empty, error) + // Pulls messages from the server. The server may return `UNAVAILABLE` if + // there are too many concurrent pull requests pending for the given + // subscription. + Pull(context.Context, *PullRequest) (*PullResponse, error) + // Establishes a stream with the server, which sends messages down to the + // client. The client streams acknowledgements and ack deadline modifications + // back to the server. The server will close the stream and return the status + // on any error. The server may close the stream with status `UNAVAILABLE` to + // reassign server-side resources, in which case, the client should + // re-establish the stream. Flow control can be achieved by configuring the + // underlying RPC channel. + StreamingPull(Subscriber_StreamingPullServer) error + // Modifies the `PushConfig` for a specified subscription. + // + // This may be used to change a push subscription to a pull one (signified by + // an empty `PushConfig`) or vice versa, or change the endpoint URL and other + // attributes of a push subscription. Messages will accumulate for delivery + // continuously through the call regardless of changes to the `PushConfig`. + ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*emptypb.Empty, error) + // Gets the configuration details of a snapshot. Snapshots are used in + // Seek + // operations, which allow you to manage message acknowledgments in bulk. That + // is, you can set the acknowledgment state of messages in an existing + // subscription to the state captured by a snapshot. + GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) + // Lists the existing snapshots. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) + // Creates a snapshot from the requested subscription. Snapshots are used in + // [Seek](https://cloud.google.com/pubsub/docs/replay-overview) operations, + // which allow you to manage message acknowledgments in bulk. That is, you can + // set the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // If the snapshot already exists, returns `ALREADY_EXISTS`. + // If the requested subscription doesn't exist, returns `NOT_FOUND`. + // If the backlog in the subscription is too old -- and the resulting snapshot + // would expire in less than 1 hour -- then `FAILED_PRECONDITION` is returned. + // See also the `Snapshot.expire_time` field. If the name is not provided in + // the request, the server will assign a random + // name for this snapshot on the same project as the subscription, conforming + // to the [resource name format] + // (https://cloud.google.com/pubsub/docs/admin#resource_names). The + // generated name is populated in the returned Snapshot object. Note that for + // REST API requests, you must specify a name in the request. + CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) + // Updates an existing snapshot. Snapshots are used in + // Seek + // operations, which allow + // you to manage message acknowledgments in bulk. That is, you can set the + // acknowledgment state of messages in an existing subscription to the state + // captured by a snapshot. + UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) + // Removes an existing snapshot. Snapshots are used in [Seek] + // (https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. + // When the snapshot is deleted, all messages retained in the snapshot + // are immediately dropped. After a snapshot is deleted, a new one may be + // created with the same name, but the new one has no association with the old + // snapshot or its subscription, unless the same subscription is specified. + DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) + // Seeks an existing subscription to a point in time or to a given snapshot, + // whichever is provided in the request. Snapshots are used in [Seek]( + // https://cloud.google.com/pubsub/docs/replay-overview) operations, which + // allow you to manage message acknowledgments in bulk. That is, you can set + // the acknowledgment state of messages in an existing subscription to the + // state captured by a snapshot. Note that both the subscription and the + // snapshot must be on the same topic. + Seek(context.Context, *SeekRequest) (*SeekResponse, error) +} + +// UnimplementedSubscriberServer can be embedded to have forward compatible implementations. +type UnimplementedSubscriberServer struct { +} + +func (*UnimplementedSubscriberServer) CreateSubscription(context.Context, *Subscription) (*Subscription, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSubscription not implemented") +} +func (*UnimplementedSubscriberServer) GetSubscription(context.Context, *GetSubscriptionRequest) (*Subscription, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSubscription not implemented") +} +func (*UnimplementedSubscriberServer) UpdateSubscription(context.Context, *UpdateSubscriptionRequest) (*Subscription, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSubscription not implemented") +} +func (*UnimplementedSubscriberServer) ListSubscriptions(context.Context, *ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSubscriptions not implemented") +} +func (*UnimplementedSubscriberServer) DeleteSubscription(context.Context, *DeleteSubscriptionRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSubscription not implemented") +} +func (*UnimplementedSubscriberServer) ModifyAckDeadline(context.Context, *ModifyAckDeadlineRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ModifyAckDeadline not implemented") +} +func (*UnimplementedSubscriberServer) Acknowledge(context.Context, *AcknowledgeRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method Acknowledge not implemented") +} +func (*UnimplementedSubscriberServer) Pull(context.Context, *PullRequest) (*PullResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pull not implemented") +} +func (*UnimplementedSubscriberServer) StreamingPull(Subscriber_StreamingPullServer) error { + return status.Errorf(codes.Unimplemented, "method StreamingPull not implemented") +} +func (*UnimplementedSubscriberServer) ModifyPushConfig(context.Context, *ModifyPushConfigRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method ModifyPushConfig not implemented") +} +func (*UnimplementedSubscriberServer) GetSnapshot(context.Context, *GetSnapshotRequest) (*Snapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) ListSnapshots(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedSubscriberServer) CreateSnapshot(context.Context, *CreateSnapshotRequest) (*Snapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) UpdateSnapshot(context.Context, *UpdateSnapshotRequest) (*Snapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteSnapshot not implemented") +} +func (*UnimplementedSubscriberServer) Seek(context.Context, *SeekRequest) (*SeekResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Seek not implemented") +} + +func RegisterSubscriberServer(s *grpc.Server, srv SubscriberServer) { + s.RegisterService(&_Subscriber_serviceDesc, srv) +} + +func _Subscriber_CreateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Subscription) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/CreateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSubscription(ctx, req.(*Subscription)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_GetSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).GetSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/GetSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).GetSubscription(ctx, req.(*GetSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_UpdateSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).UpdateSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/UpdateSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).UpdateSubscription(ctx, req.(*UpdateSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSubscriptions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSubscriptionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSubscriptions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ListSubscriptions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSubscriptions(ctx, req.(*ListSubscriptionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSubscription_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSubscriptionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSubscription(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/DeleteSubscription", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSubscription(ctx, req.(*DeleteSubscriptionRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ModifyAckDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyAckDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ModifyAckDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyAckDeadline(ctx, req.(*ModifyAckDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Acknowledge_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(AcknowledgeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Acknowledge(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Acknowledge", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Acknowledge(ctx, req.(*AcknowledgeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Pull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PullRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Pull(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Pull", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Pull(ctx, req.(*PullRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_StreamingPull_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(SubscriberServer).StreamingPull(&subscriberStreamingPullServer{stream}) +} + +type Subscriber_StreamingPullServer interface { + Send(*StreamingPullResponse) error + Recv() (*StreamingPullRequest, error) + grpc.ServerStream +} + +type subscriberStreamingPullServer struct { + grpc.ServerStream +} + +func (x *subscriberStreamingPullServer) Send(m *StreamingPullResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *subscriberStreamingPullServer) Recv() (*StreamingPullRequest, error) { + m := new(StreamingPullRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func _Subscriber_ModifyPushConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyPushConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ModifyPushConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ModifyPushConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ModifyPushConfig(ctx, req.(*ModifyPushConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_GetSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).GetSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/GetSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).GetSnapshot(ctx, req.(*GetSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSnapshotsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).ListSnapshots(ctx, req.(*ListSnapshotsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_CreateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).CreateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/CreateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).CreateSnapshot(ctx, req.(*CreateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_UpdateSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).UpdateSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/UpdateSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).UpdateSnapshot(ctx, req.(*UpdateSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_DeleteSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSnapshotRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).DeleteSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/DeleteSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).DeleteSnapshot(ctx, req.(*DeleteSnapshotRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Subscriber_Seek_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SeekRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(SubscriberServer).Seek(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.pubsub.v1.Subscriber/Seek", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(SubscriberServer).Seek(ctx, req.(*SeekRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Subscriber_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.pubsub.v1.Subscriber", + HandlerType: (*SubscriberServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateSubscription", + Handler: _Subscriber_CreateSubscription_Handler, + }, + { + MethodName: "GetSubscription", + Handler: _Subscriber_GetSubscription_Handler, + }, + { + MethodName: "UpdateSubscription", + Handler: _Subscriber_UpdateSubscription_Handler, + }, + { + MethodName: "ListSubscriptions", + Handler: _Subscriber_ListSubscriptions_Handler, + }, + { + MethodName: "DeleteSubscription", + Handler: _Subscriber_DeleteSubscription_Handler, + }, + { + MethodName: "ModifyAckDeadline", + Handler: _Subscriber_ModifyAckDeadline_Handler, + }, + { + MethodName: "Acknowledge", + Handler: _Subscriber_Acknowledge_Handler, + }, + { + MethodName: "Pull", + Handler: _Subscriber_Pull_Handler, + }, + { + MethodName: "ModifyPushConfig", + Handler: _Subscriber_ModifyPushConfig_Handler, + }, + { + MethodName: "GetSnapshot", + Handler: _Subscriber_GetSnapshot_Handler, + }, + { + MethodName: "ListSnapshots", + Handler: _Subscriber_ListSnapshots_Handler, + }, + { + MethodName: "CreateSnapshot", + Handler: _Subscriber_CreateSnapshot_Handler, + }, + { + MethodName: "UpdateSnapshot", + Handler: _Subscriber_UpdateSnapshot_Handler, + }, + { + MethodName: "DeleteSnapshot", + Handler: _Subscriber_DeleteSnapshot_Handler, + }, + { + MethodName: "Seek", + Handler: _Subscriber_Seek_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "StreamingPull", + Handler: _Subscriber_StreamingPull_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/pubsub/v1/pubsub.proto", +} diff --git a/vendor/modules.txt b/vendor/modules.txt index c710c4b0b025..99f9bd3423d8 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -4,6 +4,7 @@ cloud.google.com/go/compute/metadata cloud.google.com/go/iam cloud.google.com/go/internal cloud.google.com/go/internal/optional +cloud.google.com/go/internal/testutil cloud.google.com/go/internal/trace cloud.google.com/go/internal/version cloud.google.com/go/longrunning @@ -12,6 +13,12 @@ cloud.google.com/go/longrunning/autogen cloud.google.com/go/bigtable cloud.google.com/go/bigtable/bttest cloud.google.com/go/bigtable/internal/option +# cloud.google.com/go/pubsub v1.3.1 +## explicit +cloud.google.com/go/pubsub +cloud.google.com/go/pubsub/apiv1 +cloud.google.com/go/pubsub/internal/distribution +cloud.google.com/go/pubsub/pstest # cloud.google.com/go/storage v1.10.0 cloud.google.com/go/storage # github.com/Azure/azure-pipeline-go v0.2.2 @@ -1148,6 +1155,7 @@ golang.org/x/tools/internal/imports golang.org/x/xerrors golang.org/x/xerrors/internal # google.golang.org/api v0.35.0 +## explicit google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1 google.golang.org/api/googleapi @@ -1160,6 +1168,7 @@ google.golang.org/api/iterator google.golang.org/api/option google.golang.org/api/option/internaloption google.golang.org/api/storage/v1 +google.golang.org/api/support/bundler google.golang.org/api/transport/cert google.golang.org/api/transport/grpc google.golang.org/api/transport/http @@ -1185,6 +1194,7 @@ google.golang.org/genproto/googleapis/bigtable/admin/v2 google.golang.org/genproto/googleapis/bigtable/v2 google.golang.org/genproto/googleapis/iam/v1 google.golang.org/genproto/googleapis/longrunning +google.golang.org/genproto/googleapis/pubsub/v1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr