From 6d5408c100e464e807cc3b1ad48be512c96167e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 14 Apr 2020 16:19:56 +0200 Subject: [PATCH] Update Cortex to latest master (#1869) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update Cortex to master Signed-off-by: Peter Štibraný * Fix compilation errors after Cortex update. Signed-off-by: Peter Štibraný * Update to Cortex v1.0.0-rc.0 Signed-off-by: Peter Štibraný * Added missing vendored file. Signed-off-by: Peter Štibraný * Vendor Cortex 1.0.0 Signed-off-by: Peter Štibraný * Configuration changes. Signed-off-by: Peter Štibraný * Mention Cortex 1.0 changes and link to docs. Signed-off-by: Peter Štibraný * Fix jsonnet files, and mention fields that were removed. Signed-off-by: Peter Štibraný --- docs/clients/promtail/configuration.md | 6 +- docs/clients/promtail/troubleshooting.md | 12 +- docs/configuration/README.md | 55 +- docs/configuration/examples.md | 8 +- docs/configuration/query-frontend.md | 4 +- docs/operations/storage/table-manager.md | 2 +- docs/operations/upgrade.md | 10 + go.mod | 7 +- go.sum | 38 +- pkg/loki/modules.go | 4 +- pkg/storage/util_test.go | 8 +- production/helm/loki/values.yaml | 4 +- production/helm/promtail/values.yaml | 6 +- production/ksonnet/loki/config.libsonnet | 23 +- .../service/applicationautoscaling/api.go | 5048 ----------------- .../applicationautoscalingiface/interface.go | 116 - .../service/applicationautoscaling/doc.go | 73 - .../service/applicationautoscaling/errors.go | 60 - .../service/applicationautoscaling/service.go | 103 - .../cortex/pkg/chunk/aws/aws_autoscaling.go | 226 - .../pkg/chunk/aws/dynamodb_storage_client.go | 22 +- .../pkg/chunk/aws/dynamodb_table_client.go | 7 - .../cortex/pkg/chunk/aws/fixtures.go | 3 +- .../pkg/chunk/aws/metrics_autoscaling.go | 26 +- .../cortex/pkg/chunk/aws/s3_storage_client.go | 6 +- .../pkg/chunk/azure/blob_storage_client.go | 37 +- .../cortex/pkg/chunk/cache/background.go | 8 +- .../cortex/pkg/chunk/cache/cache.go | 24 +- .../cortex/pkg/chunk/cache/fifo_cache.go | 8 +- .../cortex/pkg/chunk/cache/memcached.go | 12 +- .../pkg/chunk/cache/memcached_client.go | 79 +- .../cortex/pkg/chunk/cache/mock.go | 7 +- .../cortex/pkg/chunk/cache/redis_cache.go | 11 +- .../pkg/chunk/cassandra/storage_client.go | 33 +- .../cortex/pkg/chunk/chunk_store.go | 24 +- .../cortex/pkg/chunk/chunk_store_utils.go | 10 +- .../cortex/pkg/chunk/composite_store.go | 8 +- .../cortex/pkg/chunk/fixtures.go | 6 +- .../pkg/chunk/gcp/bigtable_index_client.go | 4 +- .../cortex/pkg/chunk/gcp/fixtures.go | 2 +- .../cortex/pkg/chunk/gcp/gcs_object_client.go | 22 +- .../cortex/pkg/chunk/purger/purger.go | 2 + .../cortex/pkg/chunk/schema_config.go | 49 +- .../cortex/pkg/chunk/series_store.go | 9 +- .../cortex/pkg/chunk/storage/factory.go | 34 +- .../cortex/pkg/chunk/table_manager.go | 40 +- .../cortex/pkg/chunk/testutils/testutils.go | 3 +- .../cortex/pkg/distributor/distributor.go | 14 +- .../pkg/distributor/distributor_ring.go | 6 +- .../cortex/pkg/distributor/ha_tracker.go | 2 +- .../cortex/pkg/ingester/client/pool.go | 6 +- .../pkg/querier/astmapper/shard_summer.go | 26 +- .../cortex/pkg/querier/frontend/frontend.go | 2 +- .../cortex/pkg/querier/frontend/worker.go | 8 +- .../pkg/querier/queryrange/querysharding.go | 12 +- .../cortexproject/cortex/pkg/ring/http.go | 11 +- .../cortex/pkg/ring/kv/client.go | 10 +- .../cortex/pkg/ring/kv/consul/client.go | 18 +- .../cortex/pkg/ring/kv/etcd/etcd.go | 17 + .../ring/kv/memberlist/memberlist_client.go | 4 +- .../cortex/pkg/ring/lifecycler.go | 31 +- .../cortexproject/cortex/pkg/ring/model.go | 6 +- .../cortexproject/cortex/pkg/ring/ring.go | 18 +- .../cortexproject/cortex/pkg/ring/ring.pb.go | 110 +- .../cortexproject/cortex/pkg/ring/ring.proto | 1 + .../cortexproject/cortex/pkg/util/backoff.go | 6 +- .../cortex/pkg/util/experimental.go | 21 + .../cortex/pkg/util/flagext/deprecated.go | 11 + .../cortex/pkg/util/metrics_helper.go | 33 +- .../cortexproject/cortex/pkg/util/strings.go | 12 + .../cortex/pkg/util/validation/validate.go | 4 + vendor/github.com/pkg/errors/.travis.yml | 11 +- vendor/github.com/pkg/errors/Makefile | 44 + vendor/github.com/pkg/errors/README.md | 11 +- vendor/github.com/pkg/errors/errors.go | 8 +- vendor/github.com/pkg/errors/go113.go | 38 + vendor/github.com/pkg/errors/stack.go | 58 +- vendor/github.com/thanos-io/thanos/LICENSE | 201 + .../pkg/discovery/dns/miekgdns/lookup.go | 152 + .../pkg/discovery/dns/miekgdns/resolver.go | 74 + .../thanos/pkg/discovery/dns/provider.go | 152 + .../thanos/pkg/discovery/dns/resolver.go | 117 + vendor/golang.org/x/mod/LICENSE | 27 + vendor/golang.org/x/mod/PATENTS | 22 + .../{tools/internal => mod}/module/module.go | 394 +- .../{tools/internal => mod}/semver/semver.go | 4 +- .../x/tools/cmd/goimports/goimports.go | 10 +- vendor/golang.org/x/tools/go/analysis/doc.go | 77 +- .../x/tools/go/ast/astutil/imports.go | 5 +- .../x/tools/go/ast/inspector/inspector.go | 4 +- .../go/internal/gcimporter/gcimporter.go | 8 +- .../tools/go/internal/packagesdriver/sizes.go | 102 +- vendor/golang.org/x/tools/go/packages/doc.go | 3 +- .../x/tools/go/packages/external.go | 7 +- .../golang.org/x/tools/go/packages/golist.go | 755 +-- .../x/tools/go/packages/golist_overlay.go | 201 +- .../x/tools/go/packages/loadmode_string.go | 57 + .../x/tools/go/packages/packages.go | 44 +- .../x/tools/internal/fastwalk/fastwalk.go | 10 +- .../internal/fastwalk/fastwalk_portable.go | 2 +- .../tools/internal/fastwalk/fastwalk_unix.go | 7 +- .../x/tools/internal/gocommand/invoke.go | 121 + .../x/tools/internal/gopathwalk/walk.go | 43 +- .../x/tools/internal/imports/fix.go | 661 ++- .../x/tools/internal/imports/imports.go | 59 +- .../x/tools/internal/imports/mod.go | 287 +- .../x/tools/internal/imports/mod_cache.go | 95 +- .../internal/packagesinternal/packages.go | 27 + .../golang.org/x/tools/internal/span/parse.go | 100 - .../golang.org/x/tools/internal/span/span.go | 285 - .../golang.org/x/tools/internal/span/token.go | 151 - .../x/tools/internal/span/token111.go | 39 - .../x/tools/internal/span/token112.go | 16 - .../golang.org/x/tools/internal/span/uri.go | 152 - .../golang.org/x/tools/internal/span/utf16.go | 94 - vendor/golang.org/x/xerrors/LICENSE | 27 + vendor/golang.org/x/xerrors/PATENTS | 22 + vendor/golang.org/x/xerrors/README | 2 + vendor/golang.org/x/xerrors/adaptor.go | 193 + vendor/golang.org/x/xerrors/codereview.cfg | 1 + vendor/golang.org/x/xerrors/doc.go | 22 + vendor/golang.org/x/xerrors/errors.go | 33 + vendor/golang.org/x/xerrors/fmt.go | 187 + vendor/golang.org/x/xerrors/format.go | 34 + vendor/golang.org/x/xerrors/frame.go | 56 + vendor/golang.org/x/xerrors/go.mod | 3 + .../golang.org/x/xerrors/internal/internal.go | 8 + vendor/golang.org/x/xerrors/wrap.go | 106 + vendor/modules.txt | 24 +- 129 files changed, 3931 insertions(+), 8220 deletions(-) delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/errors.go delete mode 100644 vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/experimental.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/strings.go create mode 100644 vendor/github.com/pkg/errors/Makefile create mode 100644 vendor/github.com/pkg/errors/go113.go create mode 100644 vendor/github.com/thanos-io/thanos/LICENSE create mode 100644 vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go create mode 100644 vendor/golang.org/x/mod/LICENSE create mode 100644 vendor/golang.org/x/mod/PATENTS rename vendor/golang.org/x/{tools/internal => mod}/module/module.go (57%) rename vendor/golang.org/x/{tools/internal => mod}/semver/semver.go (99%) create mode 100644 vendor/golang.org/x/tools/go/packages/loadmode_string.go create mode 100644 vendor/golang.org/x/tools/internal/gocommand/invoke.go create mode 100644 vendor/golang.org/x/tools/internal/packagesinternal/packages.go delete mode 100644 vendor/golang.org/x/tools/internal/span/parse.go delete mode 100644 vendor/golang.org/x/tools/internal/span/span.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token111.go delete mode 100644 vendor/golang.org/x/tools/internal/span/token112.go delete mode 100644 vendor/golang.org/x/tools/internal/span/uri.go delete mode 100644 vendor/golang.org/x/tools/internal/span/utf16.go create mode 100644 vendor/golang.org/x/xerrors/LICENSE create mode 100644 vendor/golang.org/x/xerrors/PATENTS create mode 100644 vendor/golang.org/x/xerrors/README create mode 100644 vendor/golang.org/x/xerrors/adaptor.go create mode 100644 vendor/golang.org/x/xerrors/codereview.cfg create mode 100644 vendor/golang.org/x/xerrors/doc.go create mode 100644 vendor/golang.org/x/xerrors/errors.go create mode 100644 vendor/golang.org/x/xerrors/fmt.go create mode 100644 vendor/golang.org/x/xerrors/format.go create mode 100644 vendor/golang.org/x/xerrors/frame.go create mode 100644 vendor/golang.org/x/xerrors/go.mod create mode 100644 vendor/golang.org/x/xerrors/internal/internal.go create mode 100644 vendor/golang.org/x/xerrors/wrap.go diff --git a/docs/clients/promtail/configuration.md b/docs/clients/promtail/configuration.md index 238f73c28972f..b96dbae7f56cf 100644 --- a/docs/clients/promtail/configuration.md +++ b/docs/clients/promtail/configuration.md @@ -211,13 +211,13 @@ tls_config: # For a total time of 511.5s(8.5m) before logs are lost backoff_config: # Initial backoff time between retries - [minbackoff: | default = 500ms] + [min_period: | default = 500ms] # Maximum backoff time between retries - [maxbackoff: | default = 5m] + [max_period: | default = 5m] # Maximum number of retries to do - [maxretries: | default = 10] + [max_retries: | default = 10] # Static labels to add to all logs being sent to Loki. # Use map like {"foo": "bar"} to add a label foo with diff --git a/docs/clients/promtail/troubleshooting.md b/docs/clients/promtail/troubleshooting.md index ab0fbc79848b8..ff8525d1509d8 100644 --- a/docs/clients/promtail/troubleshooting.md +++ b/docs/clients/promtail/troubleshooting.md @@ -102,24 +102,24 @@ batched together before getting pushed to Loki, based on the max batch duration In case of any error while sending a log entries batch, `promtail` adopts a "retry then discard" strategy: -- `promtail` retries to send log entry to the ingester up to `maxretries` times +- `promtail` retries to send log entry to the ingester up to `max_retries` times - If all retries fail, `promtail` discards the batch of log entries (_which will be lost_) and proceeds with the next one -You can configure the `maxretries` and the delay between two retries via the +You can configure the `max_retries` and the delay between two retries via the `backoff_config` in the promtail config file: ```yaml clients: - url: INGESTER-URL backoff_config: - minbackoff: 100ms - maxbackoff: 10s - maxretries: 10 + min_period: 100ms + max_period: 10s + max_retries: 10 ``` The following table shows an example of the total delay applied by the backoff algorithm -with `minbackoff: 100ms` and `maxbackoff: 10s`: +with `min_period: 100ms` and `max_period: 10s`: | Retry | Min delay | Max delay | Total min delay | Total max delay | | ----- | --------- | --------- | --------------- | --------------- | diff --git a/docs/configuration/README.md b/docs/configuration/README.md index 71f39aea256b9..0c7d9e7eb2718 100644 --- a/docs/configuration/README.md +++ b/docs/configuration/README.md @@ -243,13 +243,13 @@ The `grpc_client_config` block configures a client connection to a gRPC service. # Configures backoff when enbaled. backoff_config: # Minimum delay when backing off. - [minbackoff: | default = 100ms] + [min_period: | default = 100ms] # The maximum delay when backing off. - [maxbackoff: | default = 10s] + [max_period: | default = 10s] # Number of times to backoff and retry before failing. - [maxretries: | default = 10] + [max_retries: | default = 10] ``` ## ingester_config @@ -344,9 +344,6 @@ ring. # conditions with ingesters exiting and updating the ring. [min_ready_duration: | default = 1m] -# Store tokens in a normalised fashion to reduce the number of allocations. -[normalise_tokens: | default = false] - # Name of network interfaces to read addresses from. interface_names: - [ ... | default = ["eth0", "en0"]] @@ -375,13 +372,13 @@ kvstore: [host: | duration = "localhost:8500"] # The ACL Token used to interact with Consul. - [acltoken: ] + [acl_token: ] # The HTTP timeout when communicating with Consul - [httpclienttimeout: | default = 20s] + [http_client_timeout: | default = 20s] # Whether or not consistent reads to Consul are enabled. - [consistentreads: | default = true] + [consistent_reads: | default = true] # Configuration for an ETCD v3 client. Only applies if # store is "etcd" @@ -424,21 +421,17 @@ aws: [s3forcepathstyle: | default = false] # Configure the DynamoDB connection - dynamodbconfig: + dynamodb: # URL for DynamoDB with escaped Key and Secret encoded. If only region is specified as a # host, the proper endpoint will be deduced. Use inmemory:/// to # use a mock in-memory implementation. - dynamodb: + dynamodb_url: # DynamoDB table management requests per-second limit. - [apilimit: | default = 2.0] + [api_limit: | default = 2.0] # DynamoDB rate cap to back off when throttled. - [throttlelimit: | default = 10.0] - - # Application Autoscaling endpoint URL with escaped Key and Secret - # encoded. - [applicationautoscaling: ] + [throttle_limit: | default = 10.0] # Metics-based autoscaling configuration. metrics: @@ -446,34 +439,34 @@ aws: [url: ] # Queue length above which we will scale up capacity. - [targetqueuelen: | default = 100000] + [target_queue_length: | default = 100000] # Scale up capacity by this multiple - [scaleupfactor: | default = 1.3] + [scale_up_factor: | default = 1.3] # Ignore throttling below this level (rate per second) - [minthrottling: | default = 1] + [ignore_throttle_below: | default = 1] # Query to fetch ingester queue length - [queuelengthquery: | default = "sum(avg_over_time(cortex_ingester_flush_queue_length{job="cortex/ingester"}[2m]))"] + [queue_length_query: | default = "sum(avg_over_time(cortex_ingester_flush_queue_length{job="cortex/ingester"}[2m]))"] # Query to fetch throttle rates per table - [throttlequery: | default = "sum(rate(cortex_dynamo_throttled_total{operation="DynamoDB.BatchWriteItem"}[1m])) by (table) > 0"] + [write_throttle_query: | default = "sum(rate(cortex_dynamo_throttled_total{operation="DynamoDB.BatchWriteItem"}[1m])) by (table) > 0"] # Quer to fetch write capacity usage per table - [usagequery: | default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.BatchWriteItem"}[15m])) by (table) > 0"] + [write_usage_query: | default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.BatchWriteItem"}[15m])) by (table) > 0"] # Query to fetch read capacity usage per table - [readusagequery: | default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.QueryPages"}[1h])) by (table) > 0"] + [read_usage_query: | default = "sum(rate(cortex_dynamo_consumed_capacity_total{operation="DynamoDB.QueryPages"}[1h])) by (table) > 0"] # Query to fetch read errors per table - [readerrorquery: | default = "sum(increase(cortex_dynamo_failures_total{operation="DynamoDB.QueryPages",error="ProvisionedThroughputExceededException"}[1m])) by (table) > 0"] + [read_error_query: | default = "sum(increase(cortex_dynamo_failures_total{operation="DynamoDB.QueryPages",error="ProvisionedThroughputExceededException"}[1m])) by (table) > 0"] # Number of chunks to group together to parallelise fetches (0 to disable) - [chunkgangsize: | default = 10] + [chunk_gang_size: | default = 10] # Max number of chunk get operations to start in parallel. - [chunkgetmaxparallelism: | default = 32] + [chunk_get_max_parallelism: | default = 32] # Configures storing chunks in Bigtable. Required fields only required # when bigtable is defined in config. @@ -560,7 +553,7 @@ filesystem: # Cache validity for active index entries. Should be no higher than # the chunk_idle_period in the ingester settings. -[indexcachevalidity: | default = 5m] +[index_cache_validity: | default = 5m] # The maximum number of chunks to fetch per batch. [max_chunk_batch_size: | default = 50] @@ -900,7 +893,7 @@ and how to provision tables when DynamoDB is used as the backing store. [retention_period: | default = 0s] # Period with which the table manager will poll for tables. -[dynamodb_poll_interval: | default = 2m] +[poll_interval: | default = 2m] # duration a table will be created before it is needed. [creation_grace_period: | default = 10m] @@ -919,7 +912,7 @@ The `provision_config` block configures provisioning capacity for DynamoDB. ```yaml # Enables on-demand throughput provisioning for the storage # provider, if supported. Applies only to tables which are not autoscaled. -[provisioned_throughput_on_demand_mode: | default = false] +[enable_ondemand_throughput_mode: | default = false] # DynamoDB table default write throughput. [provisioned_write_throughput: | default = 3000] @@ -929,7 +922,7 @@ The `provision_config` block configures provisioning capacity for DynamoDB. # Enables on-demand throughput provisioning for the storage provide, # if supported. Applies only to tables which are not autoscaled. -[inactive_throughput_on_demand_mode: | default = false] +[enable_inactive_throughput_on_demand_mode: | default = false] # DynamoDB table write throughput for inactive tables. [inactive_write_throughput: | default = 1] diff --git a/docs/configuration/examples.md b/docs/configuration/examples.md index 7084c1fe45e7e..449c7062f3161 100644 --- a/docs/configuration/examples.md +++ b/docs/configuration/examples.md @@ -135,8 +135,8 @@ schema_config: storage_config: aws: s3: s3://access_key:secret_access_key@region/bucket_name - dynamodbconfig: - dynamodb: dynamodb://access_key:secret_access_key@region + dynamodb: + dynamodb_url: dynamodb://access_key:secret_access_key@region ``` If you don't wish to hard-code S3 credentials, you can also configure an EC2 @@ -146,8 +146,8 @@ instance role by changing the `storage_config` section: storage_config: aws: s3: s3://region/bucket_name - dynamodbconfig: - dynamodb: dynamodb://region + dynamodb: + dynamodb_url: dynamodb://region ``` ### S3-compatible APIs diff --git a/docs/configuration/query-frontend.md b/docs/configuration/query-frontend.md index e8bb3287d718f..2fcd2e8b1188b 100644 --- a/docs/configuration/query-frontend.md +++ b/docs/configuration/query-frontend.md @@ -55,7 +55,7 @@ data: frontend: log_queries_longer_than: 5s - downstream: querier..svc.cluster.local:3100 + downstream_url: querier..svc.cluster.local:3100 compress_responses: true ``` @@ -140,5 +140,5 @@ Once you've deployed these, you'll need your grafana datasource to point to the the query frontend operates in one of two fashions: -1) with `--frontend.downstream-url` or its yaml equivalent `frontend.downstream`. This simply proxies requests over http to said url. +1) with `--frontend.downstream-url` or its yaml equivalent `frontend.downstream_url`. This simply proxies requests over http to said url. 2) without (1) it defaults to a pull service. In this form, the frontend instantiates per-tenant queues that downstream queriers pull queries from via grpc. When operating in this mode, queriers need to specify `-querier.frontend-address` or its yaml equivalent `frontend_worker.frontend_address`. diff --git a/docs/operations/storage/table-manager.md b/docs/operations/storage/table-manager.md index a77460fbb6e85..1557c6c976a57 100644 --- a/docs/operations/storage/table-manager.md +++ b/docs/operations/storage/table-manager.md @@ -155,7 +155,7 @@ read/write capacity units and autoscaling. | DynamoDB | Active table | Inactive table | | ------------------- | --------------------------------------- | ------------------------------------ | -| Capacity mode | `provisioned_throughput_on_demand_mode` | `inactive_throughput_on_demand_mode` | +| Capacity mode | `enable_ondemand_throughput_mode` | `enable_inactive_throughput_on_demand_mode` | | Read capacity unit | `provisioned_read_throughput` | `inactive_read_throughput` | | Write capacity unit | `provisioned_write_throughput` | `inactive_write_throughput` | | Autoscaling | Enabled (if configured) | Always disabled | diff --git a/docs/operations/upgrade.md b/docs/operations/upgrade.md index 9e98838511810..9cf4deb10a218 100644 --- a/docs/operations/upgrade.md +++ b/docs/operations/upgrade.md @@ -6,6 +6,16 @@ Unfortunately Loki is software and software is hard and sometimes things are not On this page we will document any upgrade issues/gotchas/considerations we are aware of. +## 1.5.0 + +Loki 1.5.0 vendors Cortex v1.0.0 (congratulations!), which has a [massive list of changes](https://cortexmetrics.io/docs/changelog/#1-0-0-2020-04-02). + +While changes in the command line flags affect Loki as well, we usually recommend people to use configuration file instead. + +Cortex has done lot of cleanup in the configuration files, and you are strongly urged to take a look at the [annotated diff for config file](https://cortexmetrics.io/docs/changelog/#config-file-breaking-changes) before upgrading to Loki 1.5.0. + +Following fields were removed from YAML configuration completely: `claim_on_rollout` (always true), `normalise_tokens` (always true). + ## 1.4.0 Loki 1.4.0 vendors Cortex v0.7.0-rc.0 which contains [several breaking config changes](https://github.com/cortexproject/cortex/blob/v0.7.0-rc.0/CHANGELOG.md). diff --git a/go.mod b/go.mod index 351219fb06cbd..edc3abaabdc18 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/containerd/containerd v1.3.2 // indirect github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448 // indirect github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e - github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c + github.com/cortexproject/cortex v1.0.0 github.com/davecgh/go-spew v1.1.1 github.com/docker/distribution v2.7.1+incompatible // indirect github.com/docker/docker v0.7.3-0.20190817195342-4760db040282 @@ -39,7 +39,7 @@ require ( github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785 github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible - github.com/pkg/errors v0.8.1 + github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.5.0 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.9.1 @@ -47,12 +47,13 @@ require ( github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd github.com/stretchr/testify v1.5.1 + github.com/thanos-io/thanos v0.11.0 // indirect github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 github.com/uber/jaeger-client-go v2.20.1+incompatible github.com/ugorji/go v1.1.7 // indirect github.com/weaveworks/common v0.0.0-20200310113808-2708ba4e60a4 go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607 // indirect - golang.org/x/net v0.0.0-20191112182307-2180aed22343 + golang.org/x/net v0.0.0-20200226121028-0de0cce0169b google.golang.org/grpc v1.25.1 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/fsnotify.v1 v1.4.7 diff --git a/go.sum b/go.sum index 857119ed10428..ca8157e358386 100644 --- a/go.sum +++ b/go.sum @@ -60,6 +60,7 @@ github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbt github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/Masterminds/squirrel v0.0.0-20161115235646-20f192218cf5/go.mod h1:xnKTFzjGUiZtiOagBsfnvomW+nJg2usB1ZpordQWqNM= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= @@ -70,6 +71,7 @@ github.com/NYTimes/gziphandler v1.1.1 h1:ZUDjpQae29j0ryrS0u/B8HZfJBtBQHjqw2rQ2cq github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/OneOfOne/xxhash v1.2.5/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -93,6 +95,7 @@ github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= +github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUqU= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -102,6 +105,7 @@ github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:l github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.22.4/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.48 h1:J82DYDGZHOKHdhx6hD24Tm30c2C3GchYGfN0mf9iKUk= github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= @@ -158,8 +162,9 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c h1:WQUYYiNH49fS9bZXLbikGO0eexb8dx8W5rIe/iCiKNs= -github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c/go.mod h1:dMuT8RuWexf371937IhTj7/Ha3P/+Aog3pddNtV6Jo0= +github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= +github.com/cortexproject/cortex v1.0.0 h1:SbvD/LBbp50bQBq+lMwYoS91I6DUMbRKaYxE6UmSEa0= +github.com/cortexproject/cortex v1.0.0/go.mod h1:KixgGK5GO7YVo48k37rvHOEQlwpDCqHSPX2Mv2IuJMY= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -273,6 +278,7 @@ github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.18.0/go.mod h1:uI6pHuxWYTy94zZxgcwJkUWa9wbIlhteGfloI10GD4U= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= +github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= @@ -289,6 +295,7 @@ github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/ github.com/go-openapi/swag v0.17.2/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/validate v0.17.2/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= @@ -372,6 +379,7 @@ github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+ github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gnostic v0.0.0-20170426233943-68f4ded48ba9/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/googleapis/gnostic v0.3.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk= github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/gophercloud/gophercloud v0.0.0-20190126172459-c818fa66e4c8/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= @@ -459,6 +467,7 @@ github.com/hashicorp/memberlist v0.1.5 h1:AYBsgJOW9gab/toO5tEB8lWetVgDKZycqkebJ8 github.com/hashicorp/memberlist v0.1.5/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2 h1:YZ7UKsJv+hKjqGVUUbtE3HNj79Eln2oQ75tniF6iPt0= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= +github.com/hashicorp/serf v0.8.3/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/hashicorp/serf v0.8.5 h1:ZynDUIQiA8usmRgPdGPHFdPnb1wgGI9tK3mO9hcAJjc= github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -476,6 +485,7 @@ github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= @@ -552,6 +562,7 @@ github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.22 h1:Jm64b3bO9kP43ddLjL2EY3Io6bmy1qGb9Xxz6TqS6rc= github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= +github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/minio-go/v6 v6.0.49/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -636,17 +647,21 @@ github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible/go.mod github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/alertmanager v0.18.0/go.mod h1:WcxHBl40VSPuOaqWae6l6HpnEOVRIycEJ7i9iYkadEE= +github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= github.com/prometheus/alertmanager v0.20.0/go.mod h1:9g2i48FAyZW6BtbsnvHtMHQXl2aVtrORKwKVCQ+nbrg= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= github.com/prometheus/client_golang v1.2.0/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.2.1 h1:JnMpQc6ppsNgw9QPAGF6Dod479itz7lvlsMzzNayLOI= github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= @@ -664,6 +679,7 @@ github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7q github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0= @@ -683,6 +699,8 @@ github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/prometheus v0.0.0-20180315085919-58e2a31db8de/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/prometheus/prometheus v0.0.0-20190818123050-43acd0e2e93f/go.mod h1:rMTlmxGCvukf2KMu3fClMDKLLoJ5hl61MhcJ7xKakf0= +github.com/prometheus/prometheus v1.8.2-0.20200107122003-4708915ac6ef/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200110114423-1e64d757f711/go.mod h1:7U90zPoLkWjEIQcy/rweQla82OCTUzxVHE51G3OhJbI= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33 h1:HBYrMJj5iosUjUkAK9L5GO+5eEQXbcrzdjkqY9HV5W4= github.com/prometheus/prometheus v1.8.2-0.20200213233353-b90be6f32a33/go.mod h1:fkIPPkuZnkXyopYHmXPxf9rgiPkVgZCN8w9o8+UgBlY= @@ -748,6 +766,9 @@ github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJy github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= +github.com/thanos-io/thanos v0.8.1-0.20200326105947-214ff4480e93/go.mod h1:PeLHoE5XdPZss/3eLvuxDCFXnM6Sd2Kh+saQIRJVtBE= +github.com/thanos-io/thanos v0.11.0 h1:UkWLa93sihcxCofelRH/NBGQxFyFU73eXIr2a+dwOFM= github.com/thanos-io/thanos v0.11.0/go.mod h1:N/Yes7J68KqvmY+xM6J5CJqEvWIvKSR5sqGtmuD6wDc= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -770,6 +791,7 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= +github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/common v0.0.0-20200310113808-2708ba4e60a4 h1:H1CjeKf1q/bL7OBvb6KZclHsvnGRGr0Tsuy6y5rtFEc= github.com/weaveworks/common v0.0.0-20200310113808-2708ba4e60a4/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= @@ -791,6 +813,7 @@ go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5/go.mod h1:N0RPWo9FXJYZQI4BTkD go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607 h1:TA51XPJi/dOGnzp82lfN1wh8ijEz3BZEiKphiurSzLU= go.etcd.io/etcd v0.0.0-20190815204525-8f85f0dc2607/go.mod h1:tQYIqsNuGzkF9ncfEtoEX0qkoBhzw6ih5N1xcdGnvek= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.mongodb.org/mongo-driver v1.0.4/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= @@ -823,7 +846,9 @@ golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708 h1:pXVtWnwHkrWD9ru3sDxY/qFK/bfc0egRovX91EjWjf4= golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -847,6 +872,8 @@ golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.2.0 h1:KU7oHjnv3XNWfa5COkzUifxZmxp1TyI7ImMXqFxLwvQ= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190923162816-aa69164e4478 h1:l5EDrHhldLYb3ZRHDUhXF7Om7MvYXnkV9/iQNo1lX6g= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -887,6 +914,7 @@ golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a h1:aYOabOQFp6Vj6W1F80affTUvO9UxmJRx8K0gsfABByQ= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456 h1:ng0gs1AKnRRuEMZoTLLlbOd+C17zUDepwGQBb/n+JVg= @@ -946,7 +974,11 @@ golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 h1:EtTFh6h4SAKemS+CURDMTDIANuduG5zKEXShyy18bGA= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200306191617-51e69f71924f h1:bFIWQKTZ5vXyr7xMDvzbWUj5Y/WBE4a4sf35MAyZjx0= +golang.org/x/tools v0.0.0-20200306191617-51e69f71924f/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= @@ -1061,9 +1093,11 @@ k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= +k8s.io/kube-openapi v0.0.0-20190722073852-5e22f3d471e6/go.mod h1:RZvgC8MSN6DjiMV6oIfEE9pDL9CYXokkfaCKZeHm3nc= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a h1:UcxjrRMyNx/i/y8G7kPvLyy7rfbeuf1PYyBf973pgyU= k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0= +k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= diff --git a/pkg/loki/modules.go b/pkg/loki/modules.go index 86f00005e53d4..de6717ae66428 100644 --- a/pkg/loki/modules.go +++ b/pkg/loki/modules.go @@ -238,8 +238,8 @@ func (t *Loki) initTableManager() error { t.cfg.TableManager.IndexTables.ReadScale.Enabled || t.cfg.TableManager.ChunkTables.InactiveReadScale.Enabled || t.cfg.TableManager.IndexTables.InactiveReadScale.Enabled) && - (t.cfg.StorageConfig.AWSStorageConfig.ApplicationAutoScaling.URL == nil && t.cfg.StorageConfig.AWSStorageConfig.Metrics.URL == "") { - level.Error(util.Logger).Log("msg", "WriteScale is enabled but no ApplicationAutoScaling or Metrics URL has been provided") + t.cfg.StorageConfig.AWSStorageConfig.Metrics.URL == "" { + level.Error(util.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") os.Exit(1) } diff --git a/pkg/storage/util_test.go b/pkg/storage/util_test.go index 70849ac9d064a..3f9e42259bfeb 100644 --- a/pkg/storage/util_test.go +++ b/pkg/storage/util_test.go @@ -152,7 +152,13 @@ func (m *mockChunkStore) GetChunkRefs(ctx context.Context, userID string, from, } refs = append(refs, r) } - f, err := chunk.NewChunkFetcher(cache.Config{}, false, m.client) + + cache, err := cache.New(cache.Config{Prefix: "chunks"}) + if err != nil { + panic(err) + } + + f, err := chunk.NewChunkFetcher(cache, false, m.client) if err != nil { panic(err) } diff --git a/production/helm/loki/values.yaml b/production/helm/loki/values.yaml index ea8dd83542c8a..b530729a670b3 100644 --- a/production/helm/loki/values.yaml +++ b/production/helm/loki/values.yaml @@ -63,8 +63,8 @@ config: # consul: # host: "consul:8500" # prefix: "" - # httpclienttimeout: "20s" - # consistentreads: true + # http_client_timeout: "20s" + # consistent_reads: true limits_config: enforce_metric_name: false reject_old_samples: true diff --git a/production/helm/promtail/values.yaml b/production/helm/promtail/values.yaml index 0f33c56705392..ba4711fd1a6fc 100644 --- a/production/helm/promtail/values.yaml +++ b/production/helm/promtail/values.yaml @@ -127,11 +127,11 @@ config: backoff_config: # Initial backoff time between retries - minbackoff: 100ms + min_period: 100ms # Maximum backoff time between retries - maxbackoff: 5s + max_period: 5s # Maximum number of retries when sending batches, 0 means infinite retries - maxretries: 20 + max_retries: 20 # The labels to add to any time series or alerts when communicating with loki external_labels: {} diff --git a/production/ksonnet/loki/config.libsonnet b/production/ksonnet/loki/config.libsonnet index a38fe66558ed2..24faefb3b2320 100644 --- a/production/ksonnet/loki/config.libsonnet +++ b/production/ksonnet/loki/config.libsonnet @@ -48,10 +48,10 @@ client_configs: { dynamo: { - dynamodbconfig: {} + if $._config.dynamodb_access_key != '' then { - dynamodb: 'dynamodb://' + $._config.dynamodb_access_key + ':' + $._config.dynamodb_secret_access_key + '@' + $._config.dynamodb_region, + dynamodb: {} + if $._config.dynamodb_access_key != '' then { + dynamodb_url: 'dynamodb://' + $._config.dynamodb_access_key + ':' + $._config.dynamodb_secret_access_key + '@' + $._config.dynamodb_region, } else { - dynamodb: 'dynamodb://' + $._config.dynamodb_region, + dynamodb_url: 'dynamodb://' + $._config.dynamodb_region, }, }, s3: { @@ -107,7 +107,7 @@ max_outstanding_per_tenant: 200, }, frontend_worker: { - address: 'query-frontend.%s.svc.cluster.local:9095' % $._config.namespace, + frontend_address: 'query-frontend.%s.svc.cluster.local:9095' % $._config.namespace, // Limit to N/2 worker threads per frontend, as we have two frontends. parallelism: $._config.querierConcurrency / 2, grpc_client_config: { @@ -139,7 +139,7 @@ reject_old_samples: true, reject_old_samples_max_age: '168h', max_query_length: '12000h', // 500 days - max_streams_per_user: 0, // Disabled in favor of the global limit + max_streams_per_user: 0, // Disabled in favor of the global limit max_global_streams_per_user: 10000, // 10k ingestion_rate_strategy: 'global', ingestion_rate_mb: 10, @@ -159,8 +159,8 @@ store: 'consul', consul: { host: 'consul.%s.svc.cluster.local:8500' % $._config.namespace, - httpclienttimeout: '20s', - consistentreads: true, + http_client_timeout: '20s', + consistent_reads: true, }, }, }, @@ -168,7 +168,6 @@ num_tokens: 512, heartbeat_period: '5s', join_after: '30s', - claim_on_rollout: true, interface_names: ['eth0'], }, }, @@ -277,10 +276,10 @@ store: 'consul', consul: { host: 'consul.%s.svc.cluster.local:8500' % $._config.namespace, - httpclienttimeout: '20s', - consistentreads: false, - watchkeyratelimit: 1, - watchkeyburstsize: 1, + http_client_timeout: '20s', + consistent_reads: false, + watch_rate_limit: 1, + watch_burst_size: 1, }, }, }, diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go deleted file mode 100644 index 7eb5c356a9c41..0000000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/api.go +++ /dev/null @@ -1,5048 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package applicationautoscaling - -import ( - "fmt" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opDeleteScalingPolicy = "DeleteScalingPolicy" - -// DeleteScalingPolicyRequest generates a "aws/request.Request" representing the -// client's request for the DeleteScalingPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteScalingPolicy for more information on using the DeleteScalingPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteScalingPolicyRequest method. -// req, resp := client.DeleteScalingPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeleteScalingPolicy -func (c *ApplicationAutoScaling) DeleteScalingPolicyRequest(input *DeleteScalingPolicyInput) (req *request.Request, output *DeleteScalingPolicyOutput) { - op := &request.Operation{ - Name: opDeleteScalingPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteScalingPolicyInput{} - } - - output = &DeleteScalingPolicyOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteScalingPolicy API operation for Application Auto Scaling. -// -// Deletes the specified scaling policy for an Application Auto Scaling scalable -// target. -// -// Deleting a step scaling policy deletes the underlying alarm action, but does -// not delete the CloudWatch alarm associated with the scaling policy, even -// if it no longer has an associated action. -// -// For more information, see Delete a Step Scaling Policy (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html#delete-step-scaling-policy) -// and Delete a Target Tracking Scaling Policy (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html#delete-target-tracking-policy) -// in the Application Auto Scaling User Guide. -// -// To create a scaling policy or update an existing one, see PutScalingPolicy. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DeleteScalingPolicy for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// The specified object could not be found. For any operation that depends on -// the existence of a scalable target, this exception is thrown if the scalable -// target with the specified service namespace, resource ID, and scalable dimension -// does not exist. For any operation that deletes or deregisters a resource, -// this exception is thrown if the resource cannot be found. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeleteScalingPolicy -func (c *ApplicationAutoScaling) DeleteScalingPolicy(input *DeleteScalingPolicyInput) (*DeleteScalingPolicyOutput, error) { - req, out := c.DeleteScalingPolicyRequest(input) - return out, req.Send() -} - -// DeleteScalingPolicyWithContext is the same as DeleteScalingPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteScalingPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DeleteScalingPolicyWithContext(ctx aws.Context, input *DeleteScalingPolicyInput, opts ...request.Option) (*DeleteScalingPolicyOutput, error) { - req, out := c.DeleteScalingPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeleteScheduledAction = "DeleteScheduledAction" - -// DeleteScheduledActionRequest generates a "aws/request.Request" representing the -// client's request for the DeleteScheduledAction operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeleteScheduledAction for more information on using the DeleteScheduledAction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeleteScheduledActionRequest method. -// req, resp := client.DeleteScheduledActionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeleteScheduledAction -func (c *ApplicationAutoScaling) DeleteScheduledActionRequest(input *DeleteScheduledActionInput) (req *request.Request, output *DeleteScheduledActionOutput) { - op := &request.Operation{ - Name: opDeleteScheduledAction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteScheduledActionInput{} - } - - output = &DeleteScheduledActionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeleteScheduledAction API operation for Application Auto Scaling. -// -// Deletes the specified scheduled action for an Application Auto Scaling scalable -// target. -// -// For more information, see Delete a Scheduled Action (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-scheduled-scaling.html#delete-scheduled-action) -// in the Application Auto Scaling User Guide. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DeleteScheduledAction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// The specified object could not be found. For any operation that depends on -// the existence of a scalable target, this exception is thrown if the scalable -// target with the specified service namespace, resource ID, and scalable dimension -// does not exist. For any operation that deletes or deregisters a resource, -// this exception is thrown if the resource cannot be found. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeleteScheduledAction -func (c *ApplicationAutoScaling) DeleteScheduledAction(input *DeleteScheduledActionInput) (*DeleteScheduledActionOutput, error) { - req, out := c.DeleteScheduledActionRequest(input) - return out, req.Send() -} - -// DeleteScheduledActionWithContext is the same as DeleteScheduledAction with the addition of -// the ability to pass a context and additional request options. -// -// See DeleteScheduledAction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DeleteScheduledActionWithContext(ctx aws.Context, input *DeleteScheduledActionInput, opts ...request.Option) (*DeleteScheduledActionOutput, error) { - req, out := c.DeleteScheduledActionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDeregisterScalableTarget = "DeregisterScalableTarget" - -// DeregisterScalableTargetRequest generates a "aws/request.Request" representing the -// client's request for the DeregisterScalableTarget operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DeregisterScalableTarget for more information on using the DeregisterScalableTarget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DeregisterScalableTargetRequest method. -// req, resp := client.DeregisterScalableTargetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeregisterScalableTarget -func (c *ApplicationAutoScaling) DeregisterScalableTargetRequest(input *DeregisterScalableTargetInput) (req *request.Request, output *DeregisterScalableTargetOutput) { - op := &request.Operation{ - Name: opDeregisterScalableTarget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeregisterScalableTargetInput{} - } - - output = &DeregisterScalableTargetOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// DeregisterScalableTarget API operation for Application Auto Scaling. -// -// Deregisters an Application Auto Scaling scalable target. -// -// Deregistering a scalable target deletes the scaling policies that are associated -// with it. -// -// To create a scalable target or update an existing one, see RegisterScalableTarget. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DeregisterScalableTarget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// The specified object could not be found. For any operation that depends on -// the existence of a scalable target, this exception is thrown if the scalable -// target with the specified service namespace, resource ID, and scalable dimension -// does not exist. For any operation that deletes or deregisters a resource, -// this exception is thrown if the resource cannot be found. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DeregisterScalableTarget -func (c *ApplicationAutoScaling) DeregisterScalableTarget(input *DeregisterScalableTargetInput) (*DeregisterScalableTargetOutput, error) { - req, out := c.DeregisterScalableTargetRequest(input) - return out, req.Send() -} - -// DeregisterScalableTargetWithContext is the same as DeregisterScalableTarget with the addition of -// the ability to pass a context and additional request options. -// -// See DeregisterScalableTarget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DeregisterScalableTargetWithContext(ctx aws.Context, input *DeregisterScalableTargetInput, opts ...request.Option) (*DeregisterScalableTargetOutput, error) { - req, out := c.DeregisterScalableTargetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opDescribeScalableTargets = "DescribeScalableTargets" - -// DescribeScalableTargetsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeScalableTargets operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeScalableTargets for more information on using the DescribeScalableTargets -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeScalableTargetsRequest method. -// req, resp := client.DescribeScalableTargetsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalableTargets -func (c *ApplicationAutoScaling) DescribeScalableTargetsRequest(input *DescribeScalableTargetsInput) (req *request.Request, output *DescribeScalableTargetsOutput) { - op := &request.Operation{ - Name: opDescribeScalableTargets, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeScalableTargetsInput{} - } - - output = &DescribeScalableTargetsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeScalableTargets API operation for Application Auto Scaling. -// -// Gets information about the scalable targets in the specified namespace. -// -// You can filter the results using ResourceIds and ScalableDimension. -// -// To create a scalable target or update an existing one, see RegisterScalableTarget. -// If you are no longer using a scalable target, you can deregister it using -// DeregisterScalableTarget. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DescribeScalableTargets for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The next token supplied was invalid. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalableTargets -func (c *ApplicationAutoScaling) DescribeScalableTargets(input *DescribeScalableTargetsInput) (*DescribeScalableTargetsOutput, error) { - req, out := c.DescribeScalableTargetsRequest(input) - return out, req.Send() -} - -// DescribeScalableTargetsWithContext is the same as DescribeScalableTargets with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeScalableTargets for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScalableTargetsWithContext(ctx aws.Context, input *DescribeScalableTargetsInput, opts ...request.Option) (*DescribeScalableTargetsOutput, error) { - req, out := c.DescribeScalableTargetsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeScalableTargetsPages iterates over the pages of a DescribeScalableTargets operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeScalableTargets method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeScalableTargets operation. -// pageNum := 0 -// err := client.DescribeScalableTargetsPages(params, -// func(page *applicationautoscaling.DescribeScalableTargetsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ApplicationAutoScaling) DescribeScalableTargetsPages(input *DescribeScalableTargetsInput, fn func(*DescribeScalableTargetsOutput, bool) bool) error { - return c.DescribeScalableTargetsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeScalableTargetsPagesWithContext same as DescribeScalableTargetsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScalableTargetsPagesWithContext(ctx aws.Context, input *DescribeScalableTargetsInput, fn func(*DescribeScalableTargetsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeScalableTargetsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeScalableTargetsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeScalableTargetsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeScalingActivities = "DescribeScalingActivities" - -// DescribeScalingActivitiesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeScalingActivities operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeScalingActivities for more information on using the DescribeScalingActivities -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeScalingActivitiesRequest method. -// req, resp := client.DescribeScalingActivitiesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingActivities -func (c *ApplicationAutoScaling) DescribeScalingActivitiesRequest(input *DescribeScalingActivitiesInput) (req *request.Request, output *DescribeScalingActivitiesOutput) { - op := &request.Operation{ - Name: opDescribeScalingActivities, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeScalingActivitiesInput{} - } - - output = &DescribeScalingActivitiesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeScalingActivities API operation for Application Auto Scaling. -// -// Provides descriptive information about the scaling activities in the specified -// namespace from the previous six weeks. -// -// You can filter the results using ResourceId and ScalableDimension. -// -// Scaling activities are triggered by CloudWatch alarms that are associated -// with scaling policies. To view the scaling policies for a service namespace, -// see DescribeScalingPolicies. To create a scaling policy or update an existing -// one, see PutScalingPolicy. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DescribeScalingActivities for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The next token supplied was invalid. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingActivities -func (c *ApplicationAutoScaling) DescribeScalingActivities(input *DescribeScalingActivitiesInput) (*DescribeScalingActivitiesOutput, error) { - req, out := c.DescribeScalingActivitiesRequest(input) - return out, req.Send() -} - -// DescribeScalingActivitiesWithContext is the same as DescribeScalingActivities with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeScalingActivities for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScalingActivitiesWithContext(ctx aws.Context, input *DescribeScalingActivitiesInput, opts ...request.Option) (*DescribeScalingActivitiesOutput, error) { - req, out := c.DescribeScalingActivitiesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeScalingActivitiesPages iterates over the pages of a DescribeScalingActivities operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeScalingActivities method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeScalingActivities operation. -// pageNum := 0 -// err := client.DescribeScalingActivitiesPages(params, -// func(page *applicationautoscaling.DescribeScalingActivitiesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ApplicationAutoScaling) DescribeScalingActivitiesPages(input *DescribeScalingActivitiesInput, fn func(*DescribeScalingActivitiesOutput, bool) bool) error { - return c.DescribeScalingActivitiesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeScalingActivitiesPagesWithContext same as DescribeScalingActivitiesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScalingActivitiesPagesWithContext(ctx aws.Context, input *DescribeScalingActivitiesInput, fn func(*DescribeScalingActivitiesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeScalingActivitiesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeScalingActivitiesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeScalingActivitiesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeScalingPolicies = "DescribeScalingPolicies" - -// DescribeScalingPoliciesRequest generates a "aws/request.Request" representing the -// client's request for the DescribeScalingPolicies operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeScalingPolicies for more information on using the DescribeScalingPolicies -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeScalingPoliciesRequest method. -// req, resp := client.DescribeScalingPoliciesRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingPolicies -func (c *ApplicationAutoScaling) DescribeScalingPoliciesRequest(input *DescribeScalingPoliciesInput) (req *request.Request, output *DescribeScalingPoliciesOutput) { - op := &request.Operation{ - Name: opDescribeScalingPolicies, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeScalingPoliciesInput{} - } - - output = &DescribeScalingPoliciesOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeScalingPolicies API operation for Application Auto Scaling. -// -// Describes the Application Auto Scaling scaling policies for the specified -// service namespace. -// -// You can filter the results using ResourceId, ScalableDimension, and PolicyNames. -// -// To create a scaling policy or update an existing one, see PutScalingPolicy. -// If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DescribeScalingPolicies for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeFailedResourceAccessException "FailedResourceAccessException" -// Failed access to resources caused an exception. This exception is thrown -// when Application Auto Scaling is unable to retrieve the alarms associated -// with a scaling policy due to a client error, for example, if the role ARN -// specified for a scalable target does not have permission to call the CloudWatch -// DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) -// on your behalf. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The next token supplied was invalid. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScalingPolicies -func (c *ApplicationAutoScaling) DescribeScalingPolicies(input *DescribeScalingPoliciesInput) (*DescribeScalingPoliciesOutput, error) { - req, out := c.DescribeScalingPoliciesRequest(input) - return out, req.Send() -} - -// DescribeScalingPoliciesWithContext is the same as DescribeScalingPolicies with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeScalingPolicies for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScalingPoliciesWithContext(ctx aws.Context, input *DescribeScalingPoliciesInput, opts ...request.Option) (*DescribeScalingPoliciesOutput, error) { - req, out := c.DescribeScalingPoliciesRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeScalingPoliciesPages iterates over the pages of a DescribeScalingPolicies operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeScalingPolicies method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeScalingPolicies operation. -// pageNum := 0 -// err := client.DescribeScalingPoliciesPages(params, -// func(page *applicationautoscaling.DescribeScalingPoliciesOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ApplicationAutoScaling) DescribeScalingPoliciesPages(input *DescribeScalingPoliciesInput, fn func(*DescribeScalingPoliciesOutput, bool) bool) error { - return c.DescribeScalingPoliciesPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeScalingPoliciesPagesWithContext same as DescribeScalingPoliciesPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScalingPoliciesPagesWithContext(ctx aws.Context, input *DescribeScalingPoliciesInput, fn func(*DescribeScalingPoliciesOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeScalingPoliciesInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeScalingPoliciesRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeScalingPoliciesOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opDescribeScheduledActions = "DescribeScheduledActions" - -// DescribeScheduledActionsRequest generates a "aws/request.Request" representing the -// client's request for the DescribeScheduledActions operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See DescribeScheduledActions for more information on using the DescribeScheduledActions -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the DescribeScheduledActionsRequest method. -// req, resp := client.DescribeScheduledActionsRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScheduledActions -func (c *ApplicationAutoScaling) DescribeScheduledActionsRequest(input *DescribeScheduledActionsInput) (req *request.Request, output *DescribeScheduledActionsOutput) { - op := &request.Operation{ - Name: opDescribeScheduledActions, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeScheduledActionsInput{} - } - - output = &DescribeScheduledActionsOutput{} - req = c.newRequest(op, input, output) - return -} - -// DescribeScheduledActions API operation for Application Auto Scaling. -// -// Describes the Application Auto Scaling scheduled actions for the specified -// service namespace. -// -// You can filter the results using the ResourceId, ScalableDimension, and ScheduledActionNames -// parameters. -// -// To create a scheduled action or update an existing one, see PutScheduledAction. -// If you are no longer using a scheduled action, you can delete it using DeleteScheduledAction. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation DescribeScheduledActions for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeInvalidNextTokenException "InvalidNextTokenException" -// The next token supplied was invalid. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/DescribeScheduledActions -func (c *ApplicationAutoScaling) DescribeScheduledActions(input *DescribeScheduledActionsInput) (*DescribeScheduledActionsOutput, error) { - req, out := c.DescribeScheduledActionsRequest(input) - return out, req.Send() -} - -// DescribeScheduledActionsWithContext is the same as DescribeScheduledActions with the addition of -// the ability to pass a context and additional request options. -// -// See DescribeScheduledActions for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScheduledActionsWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, opts ...request.Option) (*DescribeScheduledActionsOutput, error) { - req, out := c.DescribeScheduledActionsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// DescribeScheduledActionsPages iterates over the pages of a DescribeScheduledActions operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See DescribeScheduledActions method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a DescribeScheduledActions operation. -// pageNum := 0 -// err := client.DescribeScheduledActionsPages(params, -// func(page *applicationautoscaling.DescribeScheduledActionsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *ApplicationAutoScaling) DescribeScheduledActionsPages(input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool) error { - return c.DescribeScheduledActionsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// DescribeScheduledActionsPagesWithContext same as DescribeScheduledActionsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) DescribeScheduledActionsPagesWithContext(ctx aws.Context, input *DescribeScheduledActionsInput, fn func(*DescribeScheduledActionsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *DescribeScheduledActionsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.DescribeScheduledActionsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*DescribeScheduledActionsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opPutScalingPolicy = "PutScalingPolicy" - -// PutScalingPolicyRequest generates a "aws/request.Request" representing the -// client's request for the PutScalingPolicy operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutScalingPolicy for more information on using the PutScalingPolicy -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutScalingPolicyRequest method. -// req, resp := client.PutScalingPolicyRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/PutScalingPolicy -func (c *ApplicationAutoScaling) PutScalingPolicyRequest(input *PutScalingPolicyInput) (req *request.Request, output *PutScalingPolicyOutput) { - op := &request.Operation{ - Name: opPutScalingPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutScalingPolicyInput{} - } - - output = &PutScalingPolicyOutput{} - req = c.newRequest(op, input, output) - return -} - -// PutScalingPolicy API operation for Application Auto Scaling. -// -// Creates or updates a policy for an Application Auto Scaling scalable target. -// -// Each scalable target is identified by a service namespace, resource ID, and -// scalable dimension. A scaling policy applies to the scalable target identified -// by those three attributes. You cannot create a scaling policy until you have -// registered the resource as a scalable target using RegisterScalableTarget. -// -// To update a policy, specify its policy name and the parameters that you want -// to change. Any parameters that you don't specify are not changed by this -// update request. -// -// You can view the scaling policies for a service namespace using DescribeScalingPolicies. -// If you are no longer using a scaling policy, you can delete it using DeleteScalingPolicy. -// -// Multiple scaling policies can be in force at the same time for the same scalable -// target. You can have one or more target tracking scaling policies, one or -// more step scaling policies, or both. However, there is a chance that multiple -// policies could conflict, instructing the scalable target to scale out or -// in at the same time. Application Auto Scaling gives precedence to the policy -// that provides the largest capacity for both scale out and scale in. For example, -// if one policy increases capacity by 3, another policy increases capacity -// by 200 percent, and the current capacity is 10, Application Auto Scaling -// uses the policy with the highest calculated capacity (200% of 10 = 20) and -// scales out to 30. -// -// Learn more about how to work with scaling policies in the Application Auto -// Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation PutScalingPolicy for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// A per-account resource limit is exceeded. For more information, see Application -// Auto Scaling Limits (https://docs.aws.amazon.com/ApplicationAutoScaling/latest/userguide/application-auto-scaling-limits.html). -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// The specified object could not be found. For any operation that depends on -// the existence of a scalable target, this exception is thrown if the scalable -// target with the specified service namespace, resource ID, and scalable dimension -// does not exist. For any operation that deletes or deregisters a resource, -// this exception is thrown if the resource cannot be found. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeFailedResourceAccessException "FailedResourceAccessException" -// Failed access to resources caused an exception. This exception is thrown -// when Application Auto Scaling is unable to retrieve the alarms associated -// with a scaling policy due to a client error, for example, if the role ARN -// specified for a scalable target does not have permission to call the CloudWatch -// DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) -// on your behalf. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/PutScalingPolicy -func (c *ApplicationAutoScaling) PutScalingPolicy(input *PutScalingPolicyInput) (*PutScalingPolicyOutput, error) { - req, out := c.PutScalingPolicyRequest(input) - return out, req.Send() -} - -// PutScalingPolicyWithContext is the same as PutScalingPolicy with the addition of -// the ability to pass a context and additional request options. -// -// See PutScalingPolicy for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) PutScalingPolicyWithContext(ctx aws.Context, input *PutScalingPolicyInput, opts ...request.Option) (*PutScalingPolicyOutput, error) { - req, out := c.PutScalingPolicyRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opPutScheduledAction = "PutScheduledAction" - -// PutScheduledActionRequest generates a "aws/request.Request" representing the -// client's request for the PutScheduledAction operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See PutScheduledAction for more information on using the PutScheduledAction -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the PutScheduledActionRequest method. -// req, resp := client.PutScheduledActionRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/PutScheduledAction -func (c *ApplicationAutoScaling) PutScheduledActionRequest(input *PutScheduledActionInput) (req *request.Request, output *PutScheduledActionOutput) { - op := &request.Operation{ - Name: opPutScheduledAction, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutScheduledActionInput{} - } - - output = &PutScheduledActionOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// PutScheduledAction API operation for Application Auto Scaling. -// -// Creates or updates a scheduled action for an Application Auto Scaling scalable -// target. -// -// Each scalable target is identified by a service namespace, resource ID, and -// scalable dimension. A scheduled action applies to the scalable target identified -// by those three attributes. You cannot create a scheduled action until you -// have registered the resource as a scalable target using RegisterScalableTarget. -// -// To update an action, specify its name and the parameters that you want to -// change. If you don't specify start and end times, the old values are deleted. -// Any other parameters that you don't specify are not changed by this update -// request. -// -// You can view the scheduled actions using DescribeScheduledActions. If you -// are no longer using a scheduled action, you can delete it using DeleteScheduledAction. -// -// Learn more about how to work with scheduled actions in the Application Auto -// Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation PutScheduledAction for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// A per-account resource limit is exceeded. For more information, see Application -// Auto Scaling Limits (https://docs.aws.amazon.com/ApplicationAutoScaling/latest/userguide/application-auto-scaling-limits.html). -// -// * ErrCodeObjectNotFoundException "ObjectNotFoundException" -// The specified object could not be found. For any operation that depends on -// the existence of a scalable target, this exception is thrown if the scalable -// target with the specified service namespace, resource ID, and scalable dimension -// does not exist. For any operation that deletes or deregisters a resource, -// this exception is thrown if the resource cannot be found. -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/PutScheduledAction -func (c *ApplicationAutoScaling) PutScheduledAction(input *PutScheduledActionInput) (*PutScheduledActionOutput, error) { - req, out := c.PutScheduledActionRequest(input) - return out, req.Send() -} - -// PutScheduledActionWithContext is the same as PutScheduledAction with the addition of -// the ability to pass a context and additional request options. -// -// See PutScheduledAction for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) PutScheduledActionWithContext(ctx aws.Context, input *PutScheduledActionInput, opts ...request.Option) (*PutScheduledActionOutput, error) { - req, out := c.PutScheduledActionRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opRegisterScalableTarget = "RegisterScalableTarget" - -// RegisterScalableTargetRequest generates a "aws/request.Request" representing the -// client's request for the RegisterScalableTarget operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. -// -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. -// -// See RegisterScalableTarget for more information on using the RegisterScalableTarget -// API call, and error handling. -// -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. -// -// -// // Example sending a request using the RegisterScalableTargetRequest method. -// req, resp := client.RegisterScalableTargetRequest(params) -// -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/RegisterScalableTarget -func (c *ApplicationAutoScaling) RegisterScalableTargetRequest(input *RegisterScalableTargetInput) (req *request.Request, output *RegisterScalableTargetOutput) { - op := &request.Operation{ - Name: opRegisterScalableTarget, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &RegisterScalableTargetInput{} - } - - output = &RegisterScalableTargetOutput{} - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) - return -} - -// RegisterScalableTarget API operation for Application Auto Scaling. -// -// Registers or updates a scalable target. A scalable target is a resource that -// Application Auto Scaling can scale out and scale in. Scalable targets are -// uniquely identified by the combination of resource ID, scalable dimension, -// and namespace. -// -// When you register a new scalable target, you must specify values for minimum -// and maximum capacity. Application Auto Scaling will not scale capacity to -// values that are outside of this range. -// -// To update a scalable target, specify the parameter that you want to change -// as well as the following parameters that identify the scalable target: resource -// ID, scalable dimension, and namespace. Any parameters that you don't specify -// are not changed by this update request. -// -// After you register a scalable target, you do not need to register it again -// to use other Application Auto Scaling operations. To see which resources -// have been registered, use DescribeScalableTargets. You can also view the -// scaling policies for a service namespace by using DescribeScalableTargets. -// -// If you no longer need a scalable target, you can deregister it by using DeregisterScalableTarget. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. -// -// See the AWS API reference guide for Application Auto Scaling's -// API operation RegisterScalableTarget for usage and error information. -// -// Returned Error Codes: -// * ErrCodeValidationException "ValidationException" -// An exception was thrown for a validation issue. Review the available parameters -// for the API request. -// -// * ErrCodeLimitExceededException "LimitExceededException" -// A per-account resource limit is exceeded. For more information, see Application -// Auto Scaling Limits (https://docs.aws.amazon.com/ApplicationAutoScaling/latest/userguide/application-auto-scaling-limits.html). -// -// * ErrCodeConcurrentUpdateException "ConcurrentUpdateException" -// Concurrent updates caused an exception, for example, if you request an update -// to an Application Auto Scaling resource that already has a pending update. -// -// * ErrCodeInternalServiceException "InternalServiceException" -// The service encountered an internal error. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06/RegisterScalableTarget -func (c *ApplicationAutoScaling) RegisterScalableTarget(input *RegisterScalableTargetInput) (*RegisterScalableTargetOutput, error) { - req, out := c.RegisterScalableTargetRequest(input) - return out, req.Send() -} - -// RegisterScalableTargetWithContext is the same as RegisterScalableTarget with the addition of -// the ability to pass a context and additional request options. -// -// See RegisterScalableTarget for details on how to use this API operation. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *ApplicationAutoScaling) RegisterScalableTargetWithContext(ctx aws.Context, input *RegisterScalableTargetInput, opts ...request.Option) (*RegisterScalableTargetOutput, error) { - req, out := c.RegisterScalableTargetRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -// Represents a CloudWatch alarm associated with a scaling policy. -type Alarm struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the alarm. - // - // AlarmARN is a required field - AlarmARN *string `type:"string" required:"true"` - - // The name of the alarm. - // - // AlarmName is a required field - AlarmName *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s Alarm) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Alarm) GoString() string { - return s.String() -} - -// SetAlarmARN sets the AlarmARN field's value. -func (s *Alarm) SetAlarmARN(v string) *Alarm { - s.AlarmARN = &v - return s -} - -// SetAlarmName sets the AlarmName field's value. -func (s *Alarm) SetAlarmName(v string) *Alarm { - s.AlarmName = &v - return s -} - -// Represents a CloudWatch metric of your choosing for a target tracking scaling -// policy to use with Application Auto Scaling. -// -// To create your customized metric specification: -// -// * Add values for each required parameter from CloudWatch. You can use -// an existing metric, or a new metric that you create. To use your own metric, -// you must first publish the metric to CloudWatch. For more information, -// see Publish Custom Metrics (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/publishingMetrics.html) -// in the Amazon CloudWatch User Guide. -// -// * Choose a metric that changes proportionally with capacity. The value -// of the metric should increase or decrease in inverse proportion to the -// number of capacity units. That is, the value of the metric should decrease -// when capacity increases. -// -// For more information about CloudWatch, see Amazon CloudWatch Concepts (https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cloudwatch_concepts.html). -type CustomizedMetricSpecification struct { - _ struct{} `type:"structure"` - - // The dimensions of the metric. - // - // Conditional: If you published your metric with dimensions, you must specify - // the same dimensions in your scaling policy. - Dimensions []*MetricDimension `type:"list"` - - // The name of the metric. - // - // MetricName is a required field - MetricName *string `type:"string" required:"true"` - - // The namespace of the metric. - // - // Namespace is a required field - Namespace *string `type:"string" required:"true"` - - // The statistic of the metric. - // - // Statistic is a required field - Statistic *string `type:"string" required:"true" enum:"MetricStatistic"` - - // The unit of the metric. - Unit *string `type:"string"` -} - -// String returns the string representation -func (s CustomizedMetricSpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CustomizedMetricSpecification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CustomizedMetricSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CustomizedMetricSpecification"} - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.Namespace == nil { - invalidParams.Add(request.NewErrParamRequired("Namespace")) - } - if s.Statistic == nil { - invalidParams.Add(request.NewErrParamRequired("Statistic")) - } - if s.Dimensions != nil { - for i, v := range s.Dimensions { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Dimensions", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetDimensions sets the Dimensions field's value. -func (s *CustomizedMetricSpecification) SetDimensions(v []*MetricDimension) *CustomizedMetricSpecification { - s.Dimensions = v - return s -} - -// SetMetricName sets the MetricName field's value. -func (s *CustomizedMetricSpecification) SetMetricName(v string) *CustomizedMetricSpecification { - s.MetricName = &v - return s -} - -// SetNamespace sets the Namespace field's value. -func (s *CustomizedMetricSpecification) SetNamespace(v string) *CustomizedMetricSpecification { - s.Namespace = &v - return s -} - -// SetStatistic sets the Statistic field's value. -func (s *CustomizedMetricSpecification) SetStatistic(v string) *CustomizedMetricSpecification { - s.Statistic = &v - return s -} - -// SetUnit sets the Unit field's value. -func (s *CustomizedMetricSpecification) SetUnit(v string) *CustomizedMetricSpecification { - s.Unit = &v - return s -} - -type DeleteScalingPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the scaling policy. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` - - // The identifier of the resource associated with the scalable target. This - // string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DeleteScalingPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteScalingPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteScalingPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteScalingPolicyInput"} - if s.PolicyName == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyName")) - } - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ScalableDimension == nil { - invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPolicyName sets the PolicyName field's value. -func (s *DeleteScalingPolicyInput) SetPolicyName(v string) *DeleteScalingPolicyInput { - s.PolicyName = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *DeleteScalingPolicyInput) SetResourceId(v string) *DeleteScalingPolicyInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DeleteScalingPolicyInput) SetScalableDimension(v string) *DeleteScalingPolicyInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DeleteScalingPolicyInput) SetServiceNamespace(v string) *DeleteScalingPolicyInput { - s.ServiceNamespace = &v - return s -} - -type DeleteScalingPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteScalingPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteScalingPolicyOutput) GoString() string { - return s.String() -} - -type DeleteScheduledActionInput struct { - _ struct{} `type:"structure"` - - // The identifier of the resource associated with the scheduled action. This - // string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The name of the scheduled action. - // - // ScheduledActionName is a required field - ScheduledActionName *string `min:"1" type:"string" required:"true"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DeleteScheduledActionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteScheduledActionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteScheduledActionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteScheduledActionInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ScalableDimension == nil { - invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) - } - if s.ScheduledActionName == nil { - invalidParams.Add(request.NewErrParamRequired("ScheduledActionName")) - } - if s.ScheduledActionName != nil && len(*s.ScheduledActionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ScheduledActionName", 1)) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceId sets the ResourceId field's value. -func (s *DeleteScheduledActionInput) SetResourceId(v string) *DeleteScheduledActionInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DeleteScheduledActionInput) SetScalableDimension(v string) *DeleteScheduledActionInput { - s.ScalableDimension = &v - return s -} - -// SetScheduledActionName sets the ScheduledActionName field's value. -func (s *DeleteScheduledActionInput) SetScheduledActionName(v string) *DeleteScheduledActionInput { - s.ScheduledActionName = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DeleteScheduledActionInput) SetServiceNamespace(v string) *DeleteScheduledActionInput { - s.ServiceNamespace = &v - return s -} - -type DeleteScheduledActionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteScheduledActionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteScheduledActionOutput) GoString() string { - return s.String() -} - -type DeregisterScalableTargetInput struct { - _ struct{} `type:"structure"` - - // The identifier of the resource associated with the scalable target. This - // string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension associated with the scalable target. This string consists - // of the service namespace, resource type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DeregisterScalableTargetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeregisterScalableTargetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeregisterScalableTargetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeregisterScalableTargetInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ScalableDimension == nil { - invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetResourceId sets the ResourceId field's value. -func (s *DeregisterScalableTargetInput) SetResourceId(v string) *DeregisterScalableTargetInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DeregisterScalableTargetInput) SetScalableDimension(v string) *DeregisterScalableTargetInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DeregisterScalableTargetInput) SetServiceNamespace(v string) *DeregisterScalableTargetInput { - s.ServiceNamespace = &v - return s -} - -type DeregisterScalableTargetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeregisterScalableTargetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeregisterScalableTargetOutput) GoString() string { - return s.String() -} - -type DescribeScalableTargetsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of scalable targets. This value can be between 1 and 50. - // The default value is 50. - // - // If this parameter is used, the operation returns up to MaxResults results - // at a time, along with a NextToken value. To get the next set of results, - // include the NextToken value in a subsequent call. If this parameter is not - // used, the operation returns up to 50 results and a NextToken value, if applicable. - MaxResults *int64 `type:"integer"` - - // The token for the next set of results. - NextToken *string `type:"string"` - - // The identifier of the resource associated with the scalable target. This - // string consists of the resource type and unique identifier. If you specify - // a scalable dimension, you must also specify a resource ID. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - ResourceIds []*string `type:"list"` - - // The scalable dimension associated with the scalable target. This string consists - // of the service namespace, resource type, and scaling property. If you specify - // a scalable dimension, you must also specify a resource ID. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - ScalableDimension *string `type:"string" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DescribeScalableTargetsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalableTargetsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeScalableTargetsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeScalableTargetsInput"} - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeScalableTargetsInput) SetMaxResults(v int64) *DescribeScalableTargetsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalableTargetsInput) SetNextToken(v string) *DescribeScalableTargetsInput { - s.NextToken = &v - return s -} - -// SetResourceIds sets the ResourceIds field's value. -func (s *DescribeScalableTargetsInput) SetResourceIds(v []*string) *DescribeScalableTargetsInput { - s.ResourceIds = v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DescribeScalableTargetsInput) SetScalableDimension(v string) *DescribeScalableTargetsInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DescribeScalableTargetsInput) SetServiceNamespace(v string) *DescribeScalableTargetsInput { - s.ServiceNamespace = &v - return s -} - -type DescribeScalableTargetsOutput struct { - _ struct{} `type:"structure"` - - // The token required to get the next set of results. This value is null if - // there are no more results to return. - NextToken *string `type:"string"` - - // The scalable targets that match the request parameters. - ScalableTargets []*ScalableTarget `type:"list"` -} - -// String returns the string representation -func (s DescribeScalableTargetsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalableTargetsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalableTargetsOutput) SetNextToken(v string) *DescribeScalableTargetsOutput { - s.NextToken = &v - return s -} - -// SetScalableTargets sets the ScalableTargets field's value. -func (s *DescribeScalableTargetsOutput) SetScalableTargets(v []*ScalableTarget) *DescribeScalableTargetsOutput { - s.ScalableTargets = v - return s -} - -type DescribeScalingActivitiesInput struct { - _ struct{} `type:"structure"` - - // The maximum number of scalable targets. This value can be between 1 and 50. - // The default value is 50. - // - // If this parameter is used, the operation returns up to MaxResults results - // at a time, along with a NextToken value. To get the next set of results, - // include the NextToken value in a subsequent call. If this parameter is not - // used, the operation returns up to 50 results and a NextToken value, if applicable. - MaxResults *int64 `type:"integer"` - - // The token for the next set of results. - NextToken *string `type:"string"` - - // The identifier of the resource associated with the scaling activity. This - // string consists of the resource type and unique identifier. If you specify - // a scalable dimension, you must also specify a resource ID. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - ResourceId *string `min:"1" type:"string"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. If you specify a scalable dimension, you must - // also specify a resource ID. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - ScalableDimension *string `type:"string" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DescribeScalingActivitiesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalingActivitiesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeScalingActivitiesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeScalingActivitiesInput"} - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeScalingActivitiesInput) SetMaxResults(v int64) *DescribeScalingActivitiesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalingActivitiesInput) SetNextToken(v string) *DescribeScalingActivitiesInput { - s.NextToken = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *DescribeScalingActivitiesInput) SetResourceId(v string) *DescribeScalingActivitiesInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DescribeScalingActivitiesInput) SetScalableDimension(v string) *DescribeScalingActivitiesInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DescribeScalingActivitiesInput) SetServiceNamespace(v string) *DescribeScalingActivitiesInput { - s.ServiceNamespace = &v - return s -} - -type DescribeScalingActivitiesOutput struct { - _ struct{} `type:"structure"` - - // The token required to get the next set of results. This value is null if - // there are no more results to return. - NextToken *string `type:"string"` - - // A list of scaling activity objects. - ScalingActivities []*ScalingActivity `type:"list"` -} - -// String returns the string representation -func (s DescribeScalingActivitiesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalingActivitiesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalingActivitiesOutput) SetNextToken(v string) *DescribeScalingActivitiesOutput { - s.NextToken = &v - return s -} - -// SetScalingActivities sets the ScalingActivities field's value. -func (s *DescribeScalingActivitiesOutput) SetScalingActivities(v []*ScalingActivity) *DescribeScalingActivitiesOutput { - s.ScalingActivities = v - return s -} - -type DescribeScalingPoliciesInput struct { - _ struct{} `type:"structure"` - - // The maximum number of scalable targets. This value can be between 1 and 50. - // The default value is 50. - // - // If this parameter is used, the operation returns up to MaxResults results - // at a time, along with a NextToken value. To get the next set of results, - // include the NextToken value in a subsequent call. If this parameter is not - // used, the operation returns up to 50 results and a NextToken value, if applicable. - MaxResults *int64 `type:"integer"` - - // The token for the next set of results. - NextToken *string `type:"string"` - - // The names of the scaling policies to describe. - PolicyNames []*string `type:"list"` - - // The identifier of the resource associated with the scaling policy. This string - // consists of the resource type and unique identifier. If you specify a scalable - // dimension, you must also specify a resource ID. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - ResourceId *string `min:"1" type:"string"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. If you specify a scalable dimension, you must - // also specify a resource ID. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - ScalableDimension *string `type:"string" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DescribeScalingPoliciesInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalingPoliciesInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeScalingPoliciesInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeScalingPoliciesInput"} - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeScalingPoliciesInput) SetMaxResults(v int64) *DescribeScalingPoliciesInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalingPoliciesInput) SetNextToken(v string) *DescribeScalingPoliciesInput { - s.NextToken = &v - return s -} - -// SetPolicyNames sets the PolicyNames field's value. -func (s *DescribeScalingPoliciesInput) SetPolicyNames(v []*string) *DescribeScalingPoliciesInput { - s.PolicyNames = v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *DescribeScalingPoliciesInput) SetResourceId(v string) *DescribeScalingPoliciesInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DescribeScalingPoliciesInput) SetScalableDimension(v string) *DescribeScalingPoliciesInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DescribeScalingPoliciesInput) SetServiceNamespace(v string) *DescribeScalingPoliciesInput { - s.ServiceNamespace = &v - return s -} - -type DescribeScalingPoliciesOutput struct { - _ struct{} `type:"structure"` - - // The token required to get the next set of results. This value is null if - // there are no more results to return. - NextToken *string `type:"string"` - - // Information about the scaling policies. - ScalingPolicies []*ScalingPolicy `type:"list"` -} - -// String returns the string representation -func (s DescribeScalingPoliciesOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScalingPoliciesOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScalingPoliciesOutput) SetNextToken(v string) *DescribeScalingPoliciesOutput { - s.NextToken = &v - return s -} - -// SetScalingPolicies sets the ScalingPolicies field's value. -func (s *DescribeScalingPoliciesOutput) SetScalingPolicies(v []*ScalingPolicy) *DescribeScalingPoliciesOutput { - s.ScalingPolicies = v - return s -} - -type DescribeScheduledActionsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of scheduled action results. This value can be between - // 1 and 50. The default value is 50. - // - // If this parameter is used, the operation returns up to MaxResults results - // at a time, along with a NextToken value. To get the next set of results, - // include the NextToken value in a subsequent call. If this parameter is not - // used, the operation returns up to 50 results and a NextToken value, if applicable. - MaxResults *int64 `type:"integer"` - - // The token for the next set of results. - NextToken *string `type:"string"` - - // The identifier of the resource associated with the scheduled action. This - // string consists of the resource type and unique identifier. If you specify - // a scalable dimension, you must also specify a resource ID. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - ResourceId *string `min:"1" type:"string"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. If you specify a scalable dimension, you must - // also specify a resource ID. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - ScalableDimension *string `type:"string" enum:"ScalableDimension"` - - // The names of the scheduled actions to describe. - ScheduledActionNames []*string `type:"list"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` -} - -// String returns the string representation -func (s DescribeScheduledActionsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScheduledActionsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeScheduledActionsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeScheduledActionsInput"} - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxResults sets the MaxResults field's value. -func (s *DescribeScheduledActionsInput) SetMaxResults(v int64) *DescribeScheduledActionsInput { - s.MaxResults = &v - return s -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScheduledActionsInput) SetNextToken(v string) *DescribeScheduledActionsInput { - s.NextToken = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *DescribeScheduledActionsInput) SetResourceId(v string) *DescribeScheduledActionsInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *DescribeScheduledActionsInput) SetScalableDimension(v string) *DescribeScheduledActionsInput { - s.ScalableDimension = &v - return s -} - -// SetScheduledActionNames sets the ScheduledActionNames field's value. -func (s *DescribeScheduledActionsInput) SetScheduledActionNames(v []*string) *DescribeScheduledActionsInput { - s.ScheduledActionNames = v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *DescribeScheduledActionsInput) SetServiceNamespace(v string) *DescribeScheduledActionsInput { - s.ServiceNamespace = &v - return s -} - -type DescribeScheduledActionsOutput struct { - _ struct{} `type:"structure"` - - // The token required to get the next set of results. This value is null if - // there are no more results to return. - NextToken *string `type:"string"` - - // Information about the scheduled actions. - ScheduledActions []*ScheduledAction `type:"list"` -} - -// String returns the string representation -func (s DescribeScheduledActionsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeScheduledActionsOutput) GoString() string { - return s.String() -} - -// SetNextToken sets the NextToken field's value. -func (s *DescribeScheduledActionsOutput) SetNextToken(v string) *DescribeScheduledActionsOutput { - s.NextToken = &v - return s -} - -// SetScheduledActions sets the ScheduledActions field's value. -func (s *DescribeScheduledActionsOutput) SetScheduledActions(v []*ScheduledAction) *DescribeScheduledActionsOutput { - s.ScheduledActions = v - return s -} - -// Describes the dimension names and values associated with a metric. -type MetricDimension struct { - _ struct{} `type:"structure"` - - // The name of the dimension. - // - // Name is a required field - Name *string `type:"string" required:"true"` - - // The value of the dimension. - // - // Value is a required field - Value *string `type:"string" required:"true"` -} - -// String returns the string representation -func (s MetricDimension) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricDimension) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricDimension) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricDimension"} - if s.Name == nil { - invalidParams.Add(request.NewErrParamRequired("Name")) - } - if s.Value == nil { - invalidParams.Add(request.NewErrParamRequired("Value")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetName sets the Name field's value. -func (s *MetricDimension) SetName(v string) *MetricDimension { - s.Name = &v - return s -} - -// SetValue sets the Value field's value. -func (s *MetricDimension) SetValue(v string) *MetricDimension { - s.Value = &v - return s -} - -// Represents a predefined metric for a target tracking scaling policy to use -// with Application Auto Scaling. -type PredefinedMetricSpecification struct { - _ struct{} `type:"structure"` - - // The metric type. The ALBRequestCountPerTarget metric type applies only to - // Spot Fleet requests and ECS services. - // - // PredefinedMetricType is a required field - PredefinedMetricType *string `type:"string" required:"true" enum:"MetricType"` - - // Identifies the resource associated with the metric type. You can't specify - // a resource label unless the metric type is ALBRequestCountPerTarget and there - // is a target group attached to the Spot Fleet request or ECS service. - // - // The format is app///targetgroup//, - // where: - // - // * app// is the final portion of - // the load balancer ARN - // - // * targetgroup// is the final portion - // of the target group ARN. - ResourceLabel *string `min:"1" type:"string"` -} - -// String returns the string representation -func (s PredefinedMetricSpecification) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PredefinedMetricSpecification) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PredefinedMetricSpecification) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PredefinedMetricSpecification"} - if s.PredefinedMetricType == nil { - invalidParams.Add(request.NewErrParamRequired("PredefinedMetricType")) - } - if s.ResourceLabel != nil && len(*s.ResourceLabel) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceLabel", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPredefinedMetricType sets the PredefinedMetricType field's value. -func (s *PredefinedMetricSpecification) SetPredefinedMetricType(v string) *PredefinedMetricSpecification { - s.PredefinedMetricType = &v - return s -} - -// SetResourceLabel sets the ResourceLabel field's value. -func (s *PredefinedMetricSpecification) SetResourceLabel(v string) *PredefinedMetricSpecification { - s.ResourceLabel = &v - return s -} - -type PutScalingPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the scaling policy. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` - - // The policy type. This parameter is required if you are creating a scaling - // policy. - // - // The following policy types are supported: - // - // TargetTrackingScaling—Not supported for Amazon EMR - // - // StepScaling—Not supported for DynamoDB, Amazon Comprehend, or AWS Lambda - // - // For more information, see Target Tracking Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-target-tracking.html) - // and Step Scaling Policies (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-step-scaling-policies.html) - // in the Application Auto Scaling User Guide. - PolicyType *string `type:"string" enum:"PolicyType"` - - // The identifier of the resource associated with the scaling policy. This string - // consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // A step scaling policy. - // - // This parameter is required if you are creating a policy and the policy type - // is StepScaling. - StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` - - // A target tracking scaling policy. Includes support for predefined or customized - // metrics. - // - // This parameter is required if you are creating a policy and the policy type - // is TargetTrackingScaling. - TargetTrackingScalingPolicyConfiguration *TargetTrackingScalingPolicyConfiguration `type:"structure"` -} - -// String returns the string representation -func (s PutScalingPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutScalingPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutScalingPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutScalingPolicyInput"} - if s.PolicyName == nil { - invalidParams.Add(request.NewErrParamRequired("PolicyName")) - } - if s.PolicyName != nil && len(*s.PolicyName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("PolicyName", 1)) - } - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ScalableDimension == nil { - invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - if s.StepScalingPolicyConfiguration != nil { - if err := s.StepScalingPolicyConfiguration.Validate(); err != nil { - invalidParams.AddNested("StepScalingPolicyConfiguration", err.(request.ErrInvalidParams)) - } - } - if s.TargetTrackingScalingPolicyConfiguration != nil { - if err := s.TargetTrackingScalingPolicyConfiguration.Validate(); err != nil { - invalidParams.AddNested("TargetTrackingScalingPolicyConfiguration", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetPolicyName sets the PolicyName field's value. -func (s *PutScalingPolicyInput) SetPolicyName(v string) *PutScalingPolicyInput { - s.PolicyName = &v - return s -} - -// SetPolicyType sets the PolicyType field's value. -func (s *PutScalingPolicyInput) SetPolicyType(v string) *PutScalingPolicyInput { - s.PolicyType = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *PutScalingPolicyInput) SetResourceId(v string) *PutScalingPolicyInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *PutScalingPolicyInput) SetScalableDimension(v string) *PutScalingPolicyInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *PutScalingPolicyInput) SetServiceNamespace(v string) *PutScalingPolicyInput { - s.ServiceNamespace = &v - return s -} - -// SetStepScalingPolicyConfiguration sets the StepScalingPolicyConfiguration field's value. -func (s *PutScalingPolicyInput) SetStepScalingPolicyConfiguration(v *StepScalingPolicyConfiguration) *PutScalingPolicyInput { - s.StepScalingPolicyConfiguration = v - return s -} - -// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. -func (s *PutScalingPolicyInput) SetTargetTrackingScalingPolicyConfiguration(v *TargetTrackingScalingPolicyConfiguration) *PutScalingPolicyInput { - s.TargetTrackingScalingPolicyConfiguration = v - return s -} - -type PutScalingPolicyOutput struct { - _ struct{} `type:"structure"` - - // The CloudWatch alarms created for the target tracking scaling policy. - Alarms []*Alarm `type:"list"` - - // The Amazon Resource Name (ARN) of the resulting scaling policy. - // - // PolicyARN is a required field - PolicyARN *string `min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutScalingPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutScalingPolicyOutput) GoString() string { - return s.String() -} - -// SetAlarms sets the Alarms field's value. -func (s *PutScalingPolicyOutput) SetAlarms(v []*Alarm) *PutScalingPolicyOutput { - s.Alarms = v - return s -} - -// SetPolicyARN sets the PolicyARN field's value. -func (s *PutScalingPolicyOutput) SetPolicyARN(v string) *PutScalingPolicyOutput { - s.PolicyARN = &v - return s -} - -type PutScheduledActionInput struct { - _ struct{} `type:"structure"` - - // The date and time for the scheduled action to end. - EndTime *time.Time `type:"timestamp"` - - // The identifier of the resource associated with the scheduled action. This - // string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The new minimum and maximum capacity. You can set both values or just one. - // During the scheduled time, if the current capacity is below the minimum capacity, - // Application Auto Scaling scales out to the minimum capacity. If the current - // capacity is above the maximum capacity, Application Auto Scaling scales in - // to the maximum capacity. - ScalableTargetAction *ScalableTargetAction `type:"structure"` - - // The schedule for this action. The following formats are supported: - // - // * At expressions - "at(yyyy-mm-ddThh:mm:ss)" - // - // * Rate expressions - "rate(value unit)" - // - // * Cron expressions - "cron(fields)" - // - // At expressions are useful for one-time schedules. Specify the time, in UTC. - // - // For rate expressions, value is a positive integer and unit is minute | minutes - // | hour | hours | day | days. - // - // For more information about cron expressions, see Cron Expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) - // in the Amazon CloudWatch Events User Guide. - Schedule *string `min:"1" type:"string"` - - // The name of the scheduled action. - // - // ScheduledActionName is a required field - ScheduledActionName *string `min:"1" type:"string" required:"true"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // The date and time for the scheduled action to start. - StartTime *time.Time `type:"timestamp"` -} - -// String returns the string representation -func (s PutScheduledActionInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutScheduledActionInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutScheduledActionInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutScheduledActionInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.ScalableDimension == nil { - invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) - } - if s.Schedule != nil && len(*s.Schedule) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Schedule", 1)) - } - if s.ScheduledActionName == nil { - invalidParams.Add(request.NewErrParamRequired("ScheduledActionName")) - } - if s.ScheduledActionName != nil && len(*s.ScheduledActionName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ScheduledActionName", 1)) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetEndTime sets the EndTime field's value. -func (s *PutScheduledActionInput) SetEndTime(v time.Time) *PutScheduledActionInput { - s.EndTime = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *PutScheduledActionInput) SetResourceId(v string) *PutScheduledActionInput { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *PutScheduledActionInput) SetScalableDimension(v string) *PutScheduledActionInput { - s.ScalableDimension = &v - return s -} - -// SetScalableTargetAction sets the ScalableTargetAction field's value. -func (s *PutScheduledActionInput) SetScalableTargetAction(v *ScalableTargetAction) *PutScheduledActionInput { - s.ScalableTargetAction = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *PutScheduledActionInput) SetSchedule(v string) *PutScheduledActionInput { - s.Schedule = &v - return s -} - -// SetScheduledActionName sets the ScheduledActionName field's value. -func (s *PutScheduledActionInput) SetScheduledActionName(v string) *PutScheduledActionInput { - s.ScheduledActionName = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *PutScheduledActionInput) SetServiceNamespace(v string) *PutScheduledActionInput { - s.ServiceNamespace = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *PutScheduledActionInput) SetStartTime(v time.Time) *PutScheduledActionInput { - s.StartTime = &v - return s -} - -type PutScheduledActionOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutScheduledActionOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutScheduledActionOutput) GoString() string { - return s.String() -} - -type RegisterScalableTargetInput struct { - _ struct{} `type:"structure"` - - // The maximum value to scale to in response to a scale-out event. MaxCapacity - // is required to register a scalable target. - MaxCapacity *int64 `type:"integer"` - - // The minimum value to scale to in response to a scale-in event. MinCapacity - // is required to register a scalable target. - MinCapacity *int64 `type:"integer"` - - // The identifier of the resource that is associated with the scalable target. - // This string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // Application Auto Scaling creates a service-linked role that grants it permissions - // to modify the scalable target on your behalf. For more information, see Service-Linked - // Roles for Application Auto Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-service-linked-roles.html). - // - // For Amazon EMR, this parameter is required, and it must specify the ARN of - // an IAM role that allows Application Auto Scaling to modify the scalable target - // on your behalf. - RoleARN *string `min:"1" type:"string"` - - // The scalable dimension associated with the scalable target. This string consists - // of the service namespace, resource type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // An embedded object that contains attributes and attribute values that are - // used to suspend and resume automatic scaling. Setting the value of an attribute - // to true suspends the specified scaling activities. Setting it to false (default) - // resumes the specified scaling activities. - // - // Suspension Outcomes - // - // * For DynamicScalingInSuspended, while a suspension is in effect, all - // scale-in activities that are triggered by a scaling policy are suspended. - // - // * For DynamicScalingOutSuspended, while a suspension is in effect, all - // scale-out activities that are triggered by a scaling policy are suspended. - // - // * For ScheduledScalingSuspended, while a suspension is in effect, all - // scaling activities that involve scheduled actions are suspended. - // - // For more information, see Suspending and Resuming Scaling (https://docs.aws.amazon.com/autoscaling/application/userguide/application-auto-scaling-suspend-resume-scaling.html) - // in the Application Auto Scaling User Guide. - SuspendedState *SuspendedState `type:"structure"` -} - -// String returns the string representation -func (s RegisterScalableTargetInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterScalableTargetInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *RegisterScalableTargetInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "RegisterScalableTargetInput"} - if s.ResourceId == nil { - invalidParams.Add(request.NewErrParamRequired("ResourceId")) - } - if s.ResourceId != nil && len(*s.ResourceId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("ResourceId", 1)) - } - if s.RoleARN != nil && len(*s.RoleARN) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleARN", 1)) - } - if s.ScalableDimension == nil { - invalidParams.Add(request.NewErrParamRequired("ScalableDimension")) - } - if s.ServiceNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("ServiceNamespace")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *RegisterScalableTargetInput) SetMaxCapacity(v int64) *RegisterScalableTargetInput { - s.MaxCapacity = &v - return s -} - -// SetMinCapacity sets the MinCapacity field's value. -func (s *RegisterScalableTargetInput) SetMinCapacity(v int64) *RegisterScalableTargetInput { - s.MinCapacity = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *RegisterScalableTargetInput) SetResourceId(v string) *RegisterScalableTargetInput { - s.ResourceId = &v - return s -} - -// SetRoleARN sets the RoleARN field's value. -func (s *RegisterScalableTargetInput) SetRoleARN(v string) *RegisterScalableTargetInput { - s.RoleARN = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *RegisterScalableTargetInput) SetScalableDimension(v string) *RegisterScalableTargetInput { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *RegisterScalableTargetInput) SetServiceNamespace(v string) *RegisterScalableTargetInput { - s.ServiceNamespace = &v - return s -} - -// SetSuspendedState sets the SuspendedState field's value. -func (s *RegisterScalableTargetInput) SetSuspendedState(v *SuspendedState) *RegisterScalableTargetInput { - s.SuspendedState = v - return s -} - -type RegisterScalableTargetOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s RegisterScalableTargetOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RegisterScalableTargetOutput) GoString() string { - return s.String() -} - -// Represents a scalable target. -type ScalableTarget struct { - _ struct{} `type:"structure"` - - // The Unix timestamp for when the scalable target was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" required:"true"` - - // The maximum value to scale to in response to a scale-out event. - // - // MaxCapacity is a required field - MaxCapacity *int64 `type:"integer" required:"true"` - - // The minimum value to scale to in response to a scale-in event. - // - // MinCapacity is a required field - MinCapacity *int64 `type:"integer" required:"true"` - - // The identifier of the resource associated with the scalable target. This - // string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The ARN of an IAM role that allows Application Auto Scaling to modify the - // scalable target on your behalf. - // - // RoleARN is a required field - RoleARN *string `min:"1" type:"string" required:"true"` - - // The scalable dimension associated with the scalable target. This string consists - // of the service namespace, resource type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // Specifies whether the scaling activities for a scalable target are in a suspended - // state. - SuspendedState *SuspendedState `type:"structure"` -} - -// String returns the string representation -func (s ScalableTarget) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScalableTarget) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *ScalableTarget) SetCreationTime(v time.Time) *ScalableTarget { - s.CreationTime = &v - return s -} - -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *ScalableTarget) SetMaxCapacity(v int64) *ScalableTarget { - s.MaxCapacity = &v - return s -} - -// SetMinCapacity sets the MinCapacity field's value. -func (s *ScalableTarget) SetMinCapacity(v int64) *ScalableTarget { - s.MinCapacity = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *ScalableTarget) SetResourceId(v string) *ScalableTarget { - s.ResourceId = &v - return s -} - -// SetRoleARN sets the RoleARN field's value. -func (s *ScalableTarget) SetRoleARN(v string) *ScalableTarget { - s.RoleARN = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *ScalableTarget) SetScalableDimension(v string) *ScalableTarget { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *ScalableTarget) SetServiceNamespace(v string) *ScalableTarget { - s.ServiceNamespace = &v - return s -} - -// SetSuspendedState sets the SuspendedState field's value. -func (s *ScalableTarget) SetSuspendedState(v *SuspendedState) *ScalableTarget { - s.SuspendedState = v - return s -} - -// Represents the minimum and maximum capacity for a scheduled action. -type ScalableTargetAction struct { - _ struct{} `type:"structure"` - - // The maximum capacity. - MaxCapacity *int64 `type:"integer"` - - // The minimum capacity. - MinCapacity *int64 `type:"integer"` -} - -// String returns the string representation -func (s ScalableTargetAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScalableTargetAction) GoString() string { - return s.String() -} - -// SetMaxCapacity sets the MaxCapacity field's value. -func (s *ScalableTargetAction) SetMaxCapacity(v int64) *ScalableTargetAction { - s.MaxCapacity = &v - return s -} - -// SetMinCapacity sets the MinCapacity field's value. -func (s *ScalableTargetAction) SetMinCapacity(v int64) *ScalableTargetAction { - s.MinCapacity = &v - return s -} - -// Represents a scaling activity. -type ScalingActivity struct { - _ struct{} `type:"structure"` - - // The unique identifier of the scaling activity. - // - // ActivityId is a required field - ActivityId *string `type:"string" required:"true"` - - // A simple description of what caused the scaling activity to happen. - // - // Cause is a required field - Cause *string `type:"string" required:"true"` - - // A simple description of what action the scaling activity intends to accomplish. - // - // Description is a required field - Description *string `type:"string" required:"true"` - - // The details about the scaling activity. - Details *string `type:"string"` - - // The Unix timestamp for when the scaling activity ended. - EndTime *time.Time `type:"timestamp"` - - // The identifier of the resource associated with the scaling activity. This - // string consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // The Unix timestamp for when the scaling activity began. - // - // StartTime is a required field - StartTime *time.Time `type:"timestamp" required:"true"` - - // Indicates the status of the scaling activity. - // - // StatusCode is a required field - StatusCode *string `type:"string" required:"true" enum:"ScalingActivityStatusCode"` - - // A simple message about the current status of the scaling activity. - StatusMessage *string `type:"string"` -} - -// String returns the string representation -func (s ScalingActivity) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScalingActivity) GoString() string { - return s.String() -} - -// SetActivityId sets the ActivityId field's value. -func (s *ScalingActivity) SetActivityId(v string) *ScalingActivity { - s.ActivityId = &v - return s -} - -// SetCause sets the Cause field's value. -func (s *ScalingActivity) SetCause(v string) *ScalingActivity { - s.Cause = &v - return s -} - -// SetDescription sets the Description field's value. -func (s *ScalingActivity) SetDescription(v string) *ScalingActivity { - s.Description = &v - return s -} - -// SetDetails sets the Details field's value. -func (s *ScalingActivity) SetDetails(v string) *ScalingActivity { - s.Details = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *ScalingActivity) SetEndTime(v time.Time) *ScalingActivity { - s.EndTime = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *ScalingActivity) SetResourceId(v string) *ScalingActivity { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *ScalingActivity) SetScalableDimension(v string) *ScalingActivity { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *ScalingActivity) SetServiceNamespace(v string) *ScalingActivity { - s.ServiceNamespace = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *ScalingActivity) SetStartTime(v time.Time) *ScalingActivity { - s.StartTime = &v - return s -} - -// SetStatusCode sets the StatusCode field's value. -func (s *ScalingActivity) SetStatusCode(v string) *ScalingActivity { - s.StatusCode = &v - return s -} - -// SetStatusMessage sets the StatusMessage field's value. -func (s *ScalingActivity) SetStatusMessage(v string) *ScalingActivity { - s.StatusMessage = &v - return s -} - -// Represents a scaling policy to use with Application Auto Scaling. -type ScalingPolicy struct { - _ struct{} `type:"structure"` - - // The CloudWatch alarms associated with the scaling policy. - Alarms []*Alarm `type:"list"` - - // The Unix timestamp for when the scaling policy was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" required:"true"` - - // The Amazon Resource Name (ARN) of the scaling policy. - // - // PolicyARN is a required field - PolicyARN *string `min:"1" type:"string" required:"true"` - - // The name of the scaling policy. - // - // PolicyName is a required field - PolicyName *string `min:"1" type:"string" required:"true"` - - // The scaling policy type. - // - // PolicyType is a required field - PolicyType *string `type:"string" required:"true" enum:"PolicyType"` - - // The identifier of the resource associated with the scaling policy. This string - // consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - // - // ScalableDimension is a required field - ScalableDimension *string `type:"string" required:"true" enum:"ScalableDimension"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // A step scaling policy. - StepScalingPolicyConfiguration *StepScalingPolicyConfiguration `type:"structure"` - - // A target tracking scaling policy. - TargetTrackingScalingPolicyConfiguration *TargetTrackingScalingPolicyConfiguration `type:"structure"` -} - -// String returns the string representation -func (s ScalingPolicy) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScalingPolicy) GoString() string { - return s.String() -} - -// SetAlarms sets the Alarms field's value. -func (s *ScalingPolicy) SetAlarms(v []*Alarm) *ScalingPolicy { - s.Alarms = v - return s -} - -// SetCreationTime sets the CreationTime field's value. -func (s *ScalingPolicy) SetCreationTime(v time.Time) *ScalingPolicy { - s.CreationTime = &v - return s -} - -// SetPolicyARN sets the PolicyARN field's value. -func (s *ScalingPolicy) SetPolicyARN(v string) *ScalingPolicy { - s.PolicyARN = &v - return s -} - -// SetPolicyName sets the PolicyName field's value. -func (s *ScalingPolicy) SetPolicyName(v string) *ScalingPolicy { - s.PolicyName = &v - return s -} - -// SetPolicyType sets the PolicyType field's value. -func (s *ScalingPolicy) SetPolicyType(v string) *ScalingPolicy { - s.PolicyType = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *ScalingPolicy) SetResourceId(v string) *ScalingPolicy { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *ScalingPolicy) SetScalableDimension(v string) *ScalingPolicy { - s.ScalableDimension = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *ScalingPolicy) SetServiceNamespace(v string) *ScalingPolicy { - s.ServiceNamespace = &v - return s -} - -// SetStepScalingPolicyConfiguration sets the StepScalingPolicyConfiguration field's value. -func (s *ScalingPolicy) SetStepScalingPolicyConfiguration(v *StepScalingPolicyConfiguration) *ScalingPolicy { - s.StepScalingPolicyConfiguration = v - return s -} - -// SetTargetTrackingScalingPolicyConfiguration sets the TargetTrackingScalingPolicyConfiguration field's value. -func (s *ScalingPolicy) SetTargetTrackingScalingPolicyConfiguration(v *TargetTrackingScalingPolicyConfiguration) *ScalingPolicy { - s.TargetTrackingScalingPolicyConfiguration = v - return s -} - -// Represents a scheduled action. -type ScheduledAction struct { - _ struct{} `type:"structure"` - - // The date and time that the scheduled action was created. - // - // CreationTime is a required field - CreationTime *time.Time `type:"timestamp" required:"true"` - - // The date and time that the action is scheduled to end. - EndTime *time.Time `type:"timestamp"` - - // The identifier of the resource associated with the scaling policy. This string - // consists of the resource type and unique identifier. - // - // * ECS service - The resource type is service and the unique identifier - // is the cluster name and service name. Example: service/default/sample-webapp. - // - // * Spot Fleet request - The resource type is spot-fleet-request and the - // unique identifier is the Spot Fleet request ID. Example: spot-fleet-request/sfr-73fbd2ce-aa30-494c-8788-1cee4EXAMPLE. - // - // * EMR cluster - The resource type is instancegroup and the unique identifier - // is the cluster ID and instance group ID. Example: instancegroup/j-2EEZNYKUA1NTV/ig-1791Y4E1L8YI0. - // - // * AppStream 2.0 fleet - The resource type is fleet and the unique identifier - // is the fleet name. Example: fleet/sample-fleet. - // - // * DynamoDB table - The resource type is table and the unique identifier - // is the table name. Example: table/my-table. - // - // * DynamoDB global secondary index - The resource type is index and the - // unique identifier is the index name. Example: table/my-table/index/my-table-index. - // - // * Aurora DB cluster - The resource type is cluster and the unique identifier - // is the cluster name. Example: cluster:my-db-cluster. - // - // * Amazon SageMaker endpoint variant - The resource type is variant and - // the unique identifier is the resource ID. Example: endpoint/my-end-point/variant/KMeansClustering. - // - // * Custom resources are not supported with a resource type. This parameter - // must specify the OutputValue from the CloudFormation template stack used - // to access the resources. The unique identifier is defined by the service - // provider. More information is available in our GitHub repository (https://github.com/aws/aws-auto-scaling-custom-resource). - // - // * Amazon Comprehend document classification endpoint - The resource type - // and unique identifier are specified using the endpoint ARN. Example: arn:aws:comprehend:us-west-2:123456789012:document-classifier-endpoint/EXAMPLE. - // - // * Lambda provisioned concurrency - The resource type is function and the - // unique identifier is the function name with a function version or alias - // name suffix that is not $LATEST. Example: function:my-function:prod or - // function:my-function:1. - // - // ResourceId is a required field - ResourceId *string `min:"1" type:"string" required:"true"` - - // The scalable dimension. This string consists of the service namespace, resource - // type, and scaling property. - // - // * ecs:service:DesiredCount - The desired task count of an ECS service. - // - // * ec2:spot-fleet-request:TargetCapacity - The target capacity of a Spot - // Fleet request. - // - // * elasticmapreduce:instancegroup:InstanceCount - The instance count of - // an EMR Instance Group. - // - // * appstream:fleet:DesiredCapacity - The desired capacity of an AppStream - // 2.0 fleet. - // - // * dynamodb:table:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB table. - // - // * dynamodb:table:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB table. - // - // * dynamodb:index:ReadCapacityUnits - The provisioned read capacity for - // a DynamoDB global secondary index. - // - // * dynamodb:index:WriteCapacityUnits - The provisioned write capacity for - // a DynamoDB global secondary index. - // - // * rds:cluster:ReadReplicaCount - The count of Aurora Replicas in an Aurora - // DB cluster. Available for Aurora MySQL-compatible edition and Aurora PostgreSQL-compatible - // edition. - // - // * sagemaker:variant:DesiredInstanceCount - The number of EC2 instances - // for an Amazon SageMaker model endpoint variant. - // - // * custom-resource:ResourceType:Property - The scalable dimension for a - // custom resource provided by your own application or service. - // - // * comprehend:document-classifier-endpoint:DesiredInferenceUnits - The - // number of inference units for an Amazon Comprehend document classification - // endpoint. - // - // * lambda:function:ProvisionedConcurrency - The provisioned concurrency - // for a Lambda function. - ScalableDimension *string `type:"string" enum:"ScalableDimension"` - - // The new minimum and maximum capacity. You can set both values or just one. - // During the scheduled time, if the current capacity is below the minimum capacity, - // Application Auto Scaling scales out to the minimum capacity. If the current - // capacity is above the maximum capacity, Application Auto Scaling scales in - // to the maximum capacity. - ScalableTargetAction *ScalableTargetAction `type:"structure"` - - // The schedule for this action. The following formats are supported: - // - // * At expressions - "at(yyyy-mm-ddThh:mm:ss)" - // - // * Rate expressions - "rate(value unit)" - // - // * Cron expressions - "cron(fields)" - // - // At expressions are useful for one-time schedules. Specify the time, in UTC. - // - // For rate expressions, value is a positive integer and unit is minute | minutes - // | hour | hours | day | days. - // - // For more information about cron expressions, see Cron Expressions (https://docs.aws.amazon.com/AmazonCloudWatch/latest/events/ScheduledEvents.html#CronExpressions) - // in the Amazon CloudWatch Events User Guide. - // - // Schedule is a required field - Schedule *string `min:"1" type:"string" required:"true"` - - // The Amazon Resource Name (ARN) of the scheduled action. - // - // ScheduledActionARN is a required field - ScheduledActionARN *string `min:"1" type:"string" required:"true"` - - // The name of the scheduled action. - // - // ScheduledActionName is a required field - ScheduledActionName *string `min:"1" type:"string" required:"true"` - - // The namespace of the AWS service that provides the resource or custom-resource - // for a resource provided by your own application or service. For more information, - // see AWS Service Namespaces (http://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html#genref-aws-service-namespaces) - // in the Amazon Web Services General Reference. - // - // ServiceNamespace is a required field - ServiceNamespace *string `type:"string" required:"true" enum:"ServiceNamespace"` - - // The date and time that the action is scheduled to begin. - StartTime *time.Time `type:"timestamp"` -} - -// String returns the string representation -func (s ScheduledAction) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ScheduledAction) GoString() string { - return s.String() -} - -// SetCreationTime sets the CreationTime field's value. -func (s *ScheduledAction) SetCreationTime(v time.Time) *ScheduledAction { - s.CreationTime = &v - return s -} - -// SetEndTime sets the EndTime field's value. -func (s *ScheduledAction) SetEndTime(v time.Time) *ScheduledAction { - s.EndTime = &v - return s -} - -// SetResourceId sets the ResourceId field's value. -func (s *ScheduledAction) SetResourceId(v string) *ScheduledAction { - s.ResourceId = &v - return s -} - -// SetScalableDimension sets the ScalableDimension field's value. -func (s *ScheduledAction) SetScalableDimension(v string) *ScheduledAction { - s.ScalableDimension = &v - return s -} - -// SetScalableTargetAction sets the ScalableTargetAction field's value. -func (s *ScheduledAction) SetScalableTargetAction(v *ScalableTargetAction) *ScheduledAction { - s.ScalableTargetAction = v - return s -} - -// SetSchedule sets the Schedule field's value. -func (s *ScheduledAction) SetSchedule(v string) *ScheduledAction { - s.Schedule = &v - return s -} - -// SetScheduledActionARN sets the ScheduledActionARN field's value. -func (s *ScheduledAction) SetScheduledActionARN(v string) *ScheduledAction { - s.ScheduledActionARN = &v - return s -} - -// SetScheduledActionName sets the ScheduledActionName field's value. -func (s *ScheduledAction) SetScheduledActionName(v string) *ScheduledAction { - s.ScheduledActionName = &v - return s -} - -// SetServiceNamespace sets the ServiceNamespace field's value. -func (s *ScheduledAction) SetServiceNamespace(v string) *ScheduledAction { - s.ServiceNamespace = &v - return s -} - -// SetStartTime sets the StartTime field's value. -func (s *ScheduledAction) SetStartTime(v time.Time) *ScheduledAction { - s.StartTime = &v - return s -} - -// Represents a step adjustment for a StepScalingPolicyConfiguration. Describes -// an adjustment based on the difference between the value of the aggregated -// CloudWatch metric and the breach threshold that you've defined for the alarm. -// -// For the following examples, suppose that you have an alarm with a breach -// threshold of 50: -// -// * To trigger the adjustment when the metric is greater than or equal to -// 50 and less than 60, specify a lower bound of 0 and an upper bound of -// 10. -// -// * To trigger the adjustment when the metric is greater than 40 and less -// than or equal to 50, specify a lower bound of -10 and an upper bound of -// 0. -// -// There are a few rules for the step adjustments for your step policy: -// -// * The ranges of your step adjustments can't overlap or have a gap. -// -// * At most one step adjustment can have a null lower bound. If one step -// adjustment has a negative lower bound, then there must be a step adjustment -// with a null lower bound. -// -// * At most one step adjustment can have a null upper bound. If one step -// adjustment has a positive upper bound, then there must be a step adjustment -// with a null upper bound. -// -// * The upper and lower bound can't be null in the same step adjustment. -type StepAdjustment struct { - _ struct{} `type:"structure"` - - // The lower bound for the difference between the alarm threshold and the CloudWatch - // metric. If the metric value is above the breach threshold, the lower bound - // is inclusive (the metric must be greater than or equal to the threshold plus - // the lower bound). Otherwise, it is exclusive (the metric must be greater - // than the threshold plus the lower bound). A null value indicates negative - // infinity. - MetricIntervalLowerBound *float64 `type:"double"` - - // The upper bound for the difference between the alarm threshold and the CloudWatch - // metric. If the metric value is above the breach threshold, the upper bound - // is exclusive (the metric must be less than the threshold plus the upper bound). - // Otherwise, it is inclusive (the metric must be less than or equal to the - // threshold plus the upper bound). A null value indicates positive infinity. - // - // The upper bound must be greater than the lower bound. - MetricIntervalUpperBound *float64 `type:"double"` - - // The amount by which to scale, based on the specified adjustment type. A positive - // value adds to the current scalable dimension while a negative number removes - // from the current scalable dimension. - // - // ScalingAdjustment is a required field - ScalingAdjustment *int64 `type:"integer" required:"true"` -} - -// String returns the string representation -func (s StepAdjustment) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StepAdjustment) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StepAdjustment) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StepAdjustment"} - if s.ScalingAdjustment == nil { - invalidParams.Add(request.NewErrParamRequired("ScalingAdjustment")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetMetricIntervalLowerBound sets the MetricIntervalLowerBound field's value. -func (s *StepAdjustment) SetMetricIntervalLowerBound(v float64) *StepAdjustment { - s.MetricIntervalLowerBound = &v - return s -} - -// SetMetricIntervalUpperBound sets the MetricIntervalUpperBound field's value. -func (s *StepAdjustment) SetMetricIntervalUpperBound(v float64) *StepAdjustment { - s.MetricIntervalUpperBound = &v - return s -} - -// SetScalingAdjustment sets the ScalingAdjustment field's value. -func (s *StepAdjustment) SetScalingAdjustment(v int64) *StepAdjustment { - s.ScalingAdjustment = &v - return s -} - -// Represents a step scaling policy configuration to use with Application Auto -// Scaling. -type StepScalingPolicyConfiguration struct { - _ struct{} `type:"structure"` - - // Specifies whether the ScalingAdjustment value in a StepAdjustment is an absolute - // number or a percentage of the current capacity. - AdjustmentType *string `type:"string" enum:"AdjustmentType"` - - // The amount of time, in seconds, after a scaling activity completes where - // previous trigger-related scaling activities can influence future scaling - // events. - // - // For scale-out policies, while the cooldown period is in effect, the capacity - // that has been added by the previous scale-out event that initiated the cooldown - // is calculated as part of the desired capacity for the next scale out. The - // intention is to continuously (but not excessively) scale out. For example, - // an alarm triggers a step scaling policy to scale out an Amazon ECS service - // by 2 tasks, the scaling activity completes successfully, and a cooldown period - // of 5 minutes starts. During the cooldown period, if the alarm triggers the - // same policy again but at a more aggressive step adjustment to scale out the - // service by 3 tasks, the 2 tasks that were added in the previous scale-out - // event are considered part of that capacity and only 1 additional task is - // added to the desired count. - // - // For scale-in policies, the cooldown period is used to block subsequent scale-in - // requests until it has expired. The intention is to scale in conservatively - // to protect your application's availability. However, if another alarm triggers - // a scale-out policy during the cooldown period after a scale-in, Application - // Auto Scaling scales out your scalable target immediately. - Cooldown *int64 `type:"integer"` - - // The aggregation type for the CloudWatch metrics. Valid values are Minimum, - // Maximum, and Average. If the aggregation type is null, the value is treated - // as Average. - MetricAggregationType *string `type:"string" enum:"MetricAggregationType"` - - // The minimum number to adjust your scalable dimension as a result of a scaling - // activity. If the adjustment type is PercentChangeInCapacity, the scaling - // policy changes the scalable dimension of the scalable target by this amount. - // - // For example, suppose that you create a step scaling policy to scale out an - // Amazon ECS service by 25 percent and you specify a MinAdjustmentMagnitude - // of 2. If the service has 4 tasks and the scaling policy is performed, 25 - // percent of 4 is 1. However, because you specified a MinAdjustmentMagnitude - // of 2, Application Auto Scaling scales out the service by 2 tasks. - MinAdjustmentMagnitude *int64 `type:"integer"` - - // A set of adjustments that enable you to scale based on the size of the alarm - // breach. - StepAdjustments []*StepAdjustment `type:"list"` -} - -// String returns the string representation -func (s StepScalingPolicyConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s StepScalingPolicyConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *StepScalingPolicyConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "StepScalingPolicyConfiguration"} - if s.StepAdjustments != nil { - for i, v := range s.StepAdjustments { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "StepAdjustments", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetAdjustmentType sets the AdjustmentType field's value. -func (s *StepScalingPolicyConfiguration) SetAdjustmentType(v string) *StepScalingPolicyConfiguration { - s.AdjustmentType = &v - return s -} - -// SetCooldown sets the Cooldown field's value. -func (s *StepScalingPolicyConfiguration) SetCooldown(v int64) *StepScalingPolicyConfiguration { - s.Cooldown = &v - return s -} - -// SetMetricAggregationType sets the MetricAggregationType field's value. -func (s *StepScalingPolicyConfiguration) SetMetricAggregationType(v string) *StepScalingPolicyConfiguration { - s.MetricAggregationType = &v - return s -} - -// SetMinAdjustmentMagnitude sets the MinAdjustmentMagnitude field's value. -func (s *StepScalingPolicyConfiguration) SetMinAdjustmentMagnitude(v int64) *StepScalingPolicyConfiguration { - s.MinAdjustmentMagnitude = &v - return s -} - -// SetStepAdjustments sets the StepAdjustments field's value. -func (s *StepScalingPolicyConfiguration) SetStepAdjustments(v []*StepAdjustment) *StepScalingPolicyConfiguration { - s.StepAdjustments = v - return s -} - -// Specifies whether the scaling activities for a scalable target are in a suspended -// state. -type SuspendedState struct { - _ struct{} `type:"structure"` - - // Whether scale in by a target tracking scaling policy or a step scaling policy - // is suspended. Set the value to true if you don't want Application Auto Scaling - // to remove capacity when a scaling policy is triggered. The default is false. - DynamicScalingInSuspended *bool `type:"boolean"` - - // Whether scale out by a target tracking scaling policy or a step scaling policy - // is suspended. Set the value to true if you don't want Application Auto Scaling - // to add capacity when a scaling policy is triggered. The default is false. - DynamicScalingOutSuspended *bool `type:"boolean"` - - // Whether scheduled scaling is suspended. Set the value to true if you don't - // want Application Auto Scaling to add or remove capacity by initiating scheduled - // actions. The default is false. - ScheduledScalingSuspended *bool `type:"boolean"` -} - -// String returns the string representation -func (s SuspendedState) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SuspendedState) GoString() string { - return s.String() -} - -// SetDynamicScalingInSuspended sets the DynamicScalingInSuspended field's value. -func (s *SuspendedState) SetDynamicScalingInSuspended(v bool) *SuspendedState { - s.DynamicScalingInSuspended = &v - return s -} - -// SetDynamicScalingOutSuspended sets the DynamicScalingOutSuspended field's value. -func (s *SuspendedState) SetDynamicScalingOutSuspended(v bool) *SuspendedState { - s.DynamicScalingOutSuspended = &v - return s -} - -// SetScheduledScalingSuspended sets the ScheduledScalingSuspended field's value. -func (s *SuspendedState) SetScheduledScalingSuspended(v bool) *SuspendedState { - s.ScheduledScalingSuspended = &v - return s -} - -// Represents a target tracking scaling policy configuration to use with Application -// Auto Scaling. -type TargetTrackingScalingPolicyConfiguration struct { - _ struct{} `type:"structure"` - - // A customized metric. You can specify either a predefined metric or a customized - // metric. - CustomizedMetricSpecification *CustomizedMetricSpecification `type:"structure"` - - // Indicates whether scale in by the target tracking scaling policy is disabled. - // If the value is true, scale in is disabled and the target tracking scaling - // policy won't remove capacity from the scalable resource. Otherwise, scale - // in is enabled and the target tracking scaling policy can remove capacity - // from the scalable resource. The default value is false. - DisableScaleIn *bool `type:"boolean"` - - // A predefined metric. You can specify either a predefined metric or a customized - // metric. - PredefinedMetricSpecification *PredefinedMetricSpecification `type:"structure"` - - // The amount of time, in seconds, after a scale-in activity completes before - // another scale in activity can start. - // - // The cooldown period is used to block subsequent scale-in requests until it - // has expired. The intention is to scale in conservatively to protect your - // application's availability. However, if another alarm triggers a scale-out - // policy during the cooldown period after a scale-in, Application Auto Scaling - // scales out your scalable target immediately. - ScaleInCooldown *int64 `type:"integer"` - - // The amount of time, in seconds, after a scale-out activity completes before - // another scale-out activity can start. - // - // While the cooldown period is in effect, the capacity that has been added - // by the previous scale-out event that initiated the cooldown is calculated - // as part of the desired capacity for the next scale out. The intention is - // to continuously (but not excessively) scale out. - ScaleOutCooldown *int64 `type:"integer"` - - // The target value for the metric. The range is 8.515920e-109 to 1.174271e+108 - // (Base 10) or 2e-360 to 2e360 (Base 2). - // - // TargetValue is a required field - TargetValue *float64 `type:"double" required:"true"` -} - -// String returns the string representation -func (s TargetTrackingScalingPolicyConfiguration) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TargetTrackingScalingPolicyConfiguration) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TargetTrackingScalingPolicyConfiguration) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TargetTrackingScalingPolicyConfiguration"} - if s.TargetValue == nil { - invalidParams.Add(request.NewErrParamRequired("TargetValue")) - } - if s.CustomizedMetricSpecification != nil { - if err := s.CustomizedMetricSpecification.Validate(); err != nil { - invalidParams.AddNested("CustomizedMetricSpecification", err.(request.ErrInvalidParams)) - } - } - if s.PredefinedMetricSpecification != nil { - if err := s.PredefinedMetricSpecification.Validate(); err != nil { - invalidParams.AddNested("PredefinedMetricSpecification", err.(request.ErrInvalidParams)) - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -// SetCustomizedMetricSpecification sets the CustomizedMetricSpecification field's value. -func (s *TargetTrackingScalingPolicyConfiguration) SetCustomizedMetricSpecification(v *CustomizedMetricSpecification) *TargetTrackingScalingPolicyConfiguration { - s.CustomizedMetricSpecification = v - return s -} - -// SetDisableScaleIn sets the DisableScaleIn field's value. -func (s *TargetTrackingScalingPolicyConfiguration) SetDisableScaleIn(v bool) *TargetTrackingScalingPolicyConfiguration { - s.DisableScaleIn = &v - return s -} - -// SetPredefinedMetricSpecification sets the PredefinedMetricSpecification field's value. -func (s *TargetTrackingScalingPolicyConfiguration) SetPredefinedMetricSpecification(v *PredefinedMetricSpecification) *TargetTrackingScalingPolicyConfiguration { - s.PredefinedMetricSpecification = v - return s -} - -// SetScaleInCooldown sets the ScaleInCooldown field's value. -func (s *TargetTrackingScalingPolicyConfiguration) SetScaleInCooldown(v int64) *TargetTrackingScalingPolicyConfiguration { - s.ScaleInCooldown = &v - return s -} - -// SetScaleOutCooldown sets the ScaleOutCooldown field's value. -func (s *TargetTrackingScalingPolicyConfiguration) SetScaleOutCooldown(v int64) *TargetTrackingScalingPolicyConfiguration { - s.ScaleOutCooldown = &v - return s -} - -// SetTargetValue sets the TargetValue field's value. -func (s *TargetTrackingScalingPolicyConfiguration) SetTargetValue(v float64) *TargetTrackingScalingPolicyConfiguration { - s.TargetValue = &v - return s -} - -const ( - // AdjustmentTypeChangeInCapacity is a AdjustmentType enum value - AdjustmentTypeChangeInCapacity = "ChangeInCapacity" - - // AdjustmentTypePercentChangeInCapacity is a AdjustmentType enum value - AdjustmentTypePercentChangeInCapacity = "PercentChangeInCapacity" - - // AdjustmentTypeExactCapacity is a AdjustmentType enum value - AdjustmentTypeExactCapacity = "ExactCapacity" -) - -const ( - // MetricAggregationTypeAverage is a MetricAggregationType enum value - MetricAggregationTypeAverage = "Average" - - // MetricAggregationTypeMinimum is a MetricAggregationType enum value - MetricAggregationTypeMinimum = "Minimum" - - // MetricAggregationTypeMaximum is a MetricAggregationType enum value - MetricAggregationTypeMaximum = "Maximum" -) - -const ( - // MetricStatisticAverage is a MetricStatistic enum value - MetricStatisticAverage = "Average" - - // MetricStatisticMinimum is a MetricStatistic enum value - MetricStatisticMinimum = "Minimum" - - // MetricStatisticMaximum is a MetricStatistic enum value - MetricStatisticMaximum = "Maximum" - - // MetricStatisticSampleCount is a MetricStatistic enum value - MetricStatisticSampleCount = "SampleCount" - - // MetricStatisticSum is a MetricStatistic enum value - MetricStatisticSum = "Sum" -) - -const ( - // MetricTypeDynamoDbreadCapacityUtilization is a MetricType enum value - MetricTypeDynamoDbreadCapacityUtilization = "DynamoDBReadCapacityUtilization" - - // MetricTypeDynamoDbwriteCapacityUtilization is a MetricType enum value - MetricTypeDynamoDbwriteCapacityUtilization = "DynamoDBWriteCapacityUtilization" - - // MetricTypeAlbrequestCountPerTarget is a MetricType enum value - MetricTypeAlbrequestCountPerTarget = "ALBRequestCountPerTarget" - - // MetricTypeRdsreaderAverageCpuutilization is a MetricType enum value - MetricTypeRdsreaderAverageCpuutilization = "RDSReaderAverageCPUUtilization" - - // MetricTypeRdsreaderAverageDatabaseConnections is a MetricType enum value - MetricTypeRdsreaderAverageDatabaseConnections = "RDSReaderAverageDatabaseConnections" - - // MetricTypeEc2spotFleetRequestAverageCpuutilization is a MetricType enum value - MetricTypeEc2spotFleetRequestAverageCpuutilization = "EC2SpotFleetRequestAverageCPUUtilization" - - // MetricTypeEc2spotFleetRequestAverageNetworkIn is a MetricType enum value - MetricTypeEc2spotFleetRequestAverageNetworkIn = "EC2SpotFleetRequestAverageNetworkIn" - - // MetricTypeEc2spotFleetRequestAverageNetworkOut is a MetricType enum value - MetricTypeEc2spotFleetRequestAverageNetworkOut = "EC2SpotFleetRequestAverageNetworkOut" - - // MetricTypeSageMakerVariantInvocationsPerInstance is a MetricType enum value - MetricTypeSageMakerVariantInvocationsPerInstance = "SageMakerVariantInvocationsPerInstance" - - // MetricTypeEcsserviceAverageCpuutilization is a MetricType enum value - MetricTypeEcsserviceAverageCpuutilization = "ECSServiceAverageCPUUtilization" - - // MetricTypeEcsserviceAverageMemoryUtilization is a MetricType enum value - MetricTypeEcsserviceAverageMemoryUtilization = "ECSServiceAverageMemoryUtilization" - - // MetricTypeAppStreamAverageCapacityUtilization is a MetricType enum value - MetricTypeAppStreamAverageCapacityUtilization = "AppStreamAverageCapacityUtilization" - - // MetricTypeComprehendInferenceUtilization is a MetricType enum value - MetricTypeComprehendInferenceUtilization = "ComprehendInferenceUtilization" - - // MetricTypeLambdaProvisionedConcurrencyUtilization is a MetricType enum value - MetricTypeLambdaProvisionedConcurrencyUtilization = "LambdaProvisionedConcurrencyUtilization" -) - -const ( - // PolicyTypeStepScaling is a PolicyType enum value - PolicyTypeStepScaling = "StepScaling" - - // PolicyTypeTargetTrackingScaling is a PolicyType enum value - PolicyTypeTargetTrackingScaling = "TargetTrackingScaling" -) - -const ( - // ScalableDimensionEcsServiceDesiredCount is a ScalableDimension enum value - ScalableDimensionEcsServiceDesiredCount = "ecs:service:DesiredCount" - - // ScalableDimensionEc2SpotFleetRequestTargetCapacity is a ScalableDimension enum value - ScalableDimensionEc2SpotFleetRequestTargetCapacity = "ec2:spot-fleet-request:TargetCapacity" - - // ScalableDimensionElasticmapreduceInstancegroupInstanceCount is a ScalableDimension enum value - ScalableDimensionElasticmapreduceInstancegroupInstanceCount = "elasticmapreduce:instancegroup:InstanceCount" - - // ScalableDimensionAppstreamFleetDesiredCapacity is a ScalableDimension enum value - ScalableDimensionAppstreamFleetDesiredCapacity = "appstream:fleet:DesiredCapacity" - - // ScalableDimensionDynamodbTableReadCapacityUnits is a ScalableDimension enum value - ScalableDimensionDynamodbTableReadCapacityUnits = "dynamodb:table:ReadCapacityUnits" - - // ScalableDimensionDynamodbTableWriteCapacityUnits is a ScalableDimension enum value - ScalableDimensionDynamodbTableWriteCapacityUnits = "dynamodb:table:WriteCapacityUnits" - - // ScalableDimensionDynamodbIndexReadCapacityUnits is a ScalableDimension enum value - ScalableDimensionDynamodbIndexReadCapacityUnits = "dynamodb:index:ReadCapacityUnits" - - // ScalableDimensionDynamodbIndexWriteCapacityUnits is a ScalableDimension enum value - ScalableDimensionDynamodbIndexWriteCapacityUnits = "dynamodb:index:WriteCapacityUnits" - - // ScalableDimensionRdsClusterReadReplicaCount is a ScalableDimension enum value - ScalableDimensionRdsClusterReadReplicaCount = "rds:cluster:ReadReplicaCount" - - // ScalableDimensionSagemakerVariantDesiredInstanceCount is a ScalableDimension enum value - ScalableDimensionSagemakerVariantDesiredInstanceCount = "sagemaker:variant:DesiredInstanceCount" - - // ScalableDimensionCustomResourceResourceTypeProperty is a ScalableDimension enum value - ScalableDimensionCustomResourceResourceTypeProperty = "custom-resource:ResourceType:Property" - - // ScalableDimensionComprehendDocumentClassifierEndpointDesiredInferenceUnits is a ScalableDimension enum value - ScalableDimensionComprehendDocumentClassifierEndpointDesiredInferenceUnits = "comprehend:document-classifier-endpoint:DesiredInferenceUnits" - - // ScalableDimensionLambdaFunctionProvisionedConcurrency is a ScalableDimension enum value - ScalableDimensionLambdaFunctionProvisionedConcurrency = "lambda:function:ProvisionedConcurrency" -) - -const ( - // ScalingActivityStatusCodePending is a ScalingActivityStatusCode enum value - ScalingActivityStatusCodePending = "Pending" - - // ScalingActivityStatusCodeInProgress is a ScalingActivityStatusCode enum value - ScalingActivityStatusCodeInProgress = "InProgress" - - // ScalingActivityStatusCodeSuccessful is a ScalingActivityStatusCode enum value - ScalingActivityStatusCodeSuccessful = "Successful" - - // ScalingActivityStatusCodeOverridden is a ScalingActivityStatusCode enum value - ScalingActivityStatusCodeOverridden = "Overridden" - - // ScalingActivityStatusCodeUnfulfilled is a ScalingActivityStatusCode enum value - ScalingActivityStatusCodeUnfulfilled = "Unfulfilled" - - // ScalingActivityStatusCodeFailed is a ScalingActivityStatusCode enum value - ScalingActivityStatusCodeFailed = "Failed" -) - -const ( - // ServiceNamespaceEcs is a ServiceNamespace enum value - ServiceNamespaceEcs = "ecs" - - // ServiceNamespaceElasticmapreduce is a ServiceNamespace enum value - ServiceNamespaceElasticmapreduce = "elasticmapreduce" - - // ServiceNamespaceEc2 is a ServiceNamespace enum value - ServiceNamespaceEc2 = "ec2" - - // ServiceNamespaceAppstream is a ServiceNamespace enum value - ServiceNamespaceAppstream = "appstream" - - // ServiceNamespaceDynamodb is a ServiceNamespace enum value - ServiceNamespaceDynamodb = "dynamodb" - - // ServiceNamespaceRds is a ServiceNamespace enum value - ServiceNamespaceRds = "rds" - - // ServiceNamespaceSagemaker is a ServiceNamespace enum value - ServiceNamespaceSagemaker = "sagemaker" - - // ServiceNamespaceCustomResource is a ServiceNamespace enum value - ServiceNamespaceCustomResource = "custom-resource" - - // ServiceNamespaceComprehend is a ServiceNamespace enum value - ServiceNamespaceComprehend = "comprehend" - - // ServiceNamespaceLambda is a ServiceNamespace enum value - ServiceNamespaceLambda = "lambda" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go deleted file mode 100644 index a5165752810e1..0000000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface/interface.go +++ /dev/null @@ -1,116 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package applicationautoscalingiface provides an interface to enable mocking the Application Auto Scaling service client -// for testing your code. -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. -package applicationautoscalingiface - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/applicationautoscaling" -) - -// ApplicationAutoScalingAPI provides an interface to enable mocking the -// applicationautoscaling.ApplicationAutoScaling service client's API operation, -// paginators, and waiters. This make unit testing your code that calls out -// to the SDK's service client's calls easier. -// -// The best way to use this interface is so the SDK's service client's calls -// can be stubbed out for unit testing your code with the SDK without needing -// to inject custom request handlers into the SDK's request pipeline. -// -// // myFunc uses an SDK service client to make a request to -// // Application Auto Scaling. -// func myFunc(svc applicationautoscalingiface.ApplicationAutoScalingAPI) bool { -// // Make svc.DeleteScalingPolicy request -// } -// -// func main() { -// sess := session.New() -// svc := applicationautoscaling.New(sess) -// -// myFunc(svc) -// } -// -// In your _test.go file: -// -// // Define a mock struct to be used in your unit tests of myFunc. -// type mockApplicationAutoScalingClient struct { -// applicationautoscalingiface.ApplicationAutoScalingAPI -// } -// func (m *mockApplicationAutoScalingClient) DeleteScalingPolicy(input *applicationautoscaling.DeleteScalingPolicyInput) (*applicationautoscaling.DeleteScalingPolicyOutput, error) { -// // mock response/functionality -// } -// -// func TestMyFunc(t *testing.T) { -// // Setup Test -// mockSvc := &mockApplicationAutoScalingClient{} -// -// myfunc(mockSvc) -// -// // Verify myFunc's functionality -// } -// -// It is important to note that this interface will have breaking changes -// when the service model is updated and adds new API operations, paginators, -// and waiters. Its suggested to use the pattern above for testing, or using -// tooling to generate mocks to satisfy the interfaces. -type ApplicationAutoScalingAPI interface { - DeleteScalingPolicy(*applicationautoscaling.DeleteScalingPolicyInput) (*applicationautoscaling.DeleteScalingPolicyOutput, error) - DeleteScalingPolicyWithContext(aws.Context, *applicationautoscaling.DeleteScalingPolicyInput, ...request.Option) (*applicationautoscaling.DeleteScalingPolicyOutput, error) - DeleteScalingPolicyRequest(*applicationautoscaling.DeleteScalingPolicyInput) (*request.Request, *applicationautoscaling.DeleteScalingPolicyOutput) - - DeleteScheduledAction(*applicationautoscaling.DeleteScheduledActionInput) (*applicationautoscaling.DeleteScheduledActionOutput, error) - DeleteScheduledActionWithContext(aws.Context, *applicationautoscaling.DeleteScheduledActionInput, ...request.Option) (*applicationautoscaling.DeleteScheduledActionOutput, error) - DeleteScheduledActionRequest(*applicationautoscaling.DeleteScheduledActionInput) (*request.Request, *applicationautoscaling.DeleteScheduledActionOutput) - - DeregisterScalableTarget(*applicationautoscaling.DeregisterScalableTargetInput) (*applicationautoscaling.DeregisterScalableTargetOutput, error) - DeregisterScalableTargetWithContext(aws.Context, *applicationautoscaling.DeregisterScalableTargetInput, ...request.Option) (*applicationautoscaling.DeregisterScalableTargetOutput, error) - DeregisterScalableTargetRequest(*applicationautoscaling.DeregisterScalableTargetInput) (*request.Request, *applicationautoscaling.DeregisterScalableTargetOutput) - - DescribeScalableTargets(*applicationautoscaling.DescribeScalableTargetsInput) (*applicationautoscaling.DescribeScalableTargetsOutput, error) - DescribeScalableTargetsWithContext(aws.Context, *applicationautoscaling.DescribeScalableTargetsInput, ...request.Option) (*applicationautoscaling.DescribeScalableTargetsOutput, error) - DescribeScalableTargetsRequest(*applicationautoscaling.DescribeScalableTargetsInput) (*request.Request, *applicationautoscaling.DescribeScalableTargetsOutput) - - DescribeScalableTargetsPages(*applicationautoscaling.DescribeScalableTargetsInput, func(*applicationautoscaling.DescribeScalableTargetsOutput, bool) bool) error - DescribeScalableTargetsPagesWithContext(aws.Context, *applicationautoscaling.DescribeScalableTargetsInput, func(*applicationautoscaling.DescribeScalableTargetsOutput, bool) bool, ...request.Option) error - - DescribeScalingActivities(*applicationautoscaling.DescribeScalingActivitiesInput) (*applicationautoscaling.DescribeScalingActivitiesOutput, error) - DescribeScalingActivitiesWithContext(aws.Context, *applicationautoscaling.DescribeScalingActivitiesInput, ...request.Option) (*applicationautoscaling.DescribeScalingActivitiesOutput, error) - DescribeScalingActivitiesRequest(*applicationautoscaling.DescribeScalingActivitiesInput) (*request.Request, *applicationautoscaling.DescribeScalingActivitiesOutput) - - DescribeScalingActivitiesPages(*applicationautoscaling.DescribeScalingActivitiesInput, func(*applicationautoscaling.DescribeScalingActivitiesOutput, bool) bool) error - DescribeScalingActivitiesPagesWithContext(aws.Context, *applicationautoscaling.DescribeScalingActivitiesInput, func(*applicationautoscaling.DescribeScalingActivitiesOutput, bool) bool, ...request.Option) error - - DescribeScalingPolicies(*applicationautoscaling.DescribeScalingPoliciesInput) (*applicationautoscaling.DescribeScalingPoliciesOutput, error) - DescribeScalingPoliciesWithContext(aws.Context, *applicationautoscaling.DescribeScalingPoliciesInput, ...request.Option) (*applicationautoscaling.DescribeScalingPoliciesOutput, error) - DescribeScalingPoliciesRequest(*applicationautoscaling.DescribeScalingPoliciesInput) (*request.Request, *applicationautoscaling.DescribeScalingPoliciesOutput) - - DescribeScalingPoliciesPages(*applicationautoscaling.DescribeScalingPoliciesInput, func(*applicationautoscaling.DescribeScalingPoliciesOutput, bool) bool) error - DescribeScalingPoliciesPagesWithContext(aws.Context, *applicationautoscaling.DescribeScalingPoliciesInput, func(*applicationautoscaling.DescribeScalingPoliciesOutput, bool) bool, ...request.Option) error - - DescribeScheduledActions(*applicationautoscaling.DescribeScheduledActionsInput) (*applicationautoscaling.DescribeScheduledActionsOutput, error) - DescribeScheduledActionsWithContext(aws.Context, *applicationautoscaling.DescribeScheduledActionsInput, ...request.Option) (*applicationautoscaling.DescribeScheduledActionsOutput, error) - DescribeScheduledActionsRequest(*applicationautoscaling.DescribeScheduledActionsInput) (*request.Request, *applicationautoscaling.DescribeScheduledActionsOutput) - - DescribeScheduledActionsPages(*applicationautoscaling.DescribeScheduledActionsInput, func(*applicationautoscaling.DescribeScheduledActionsOutput, bool) bool) error - DescribeScheduledActionsPagesWithContext(aws.Context, *applicationautoscaling.DescribeScheduledActionsInput, func(*applicationautoscaling.DescribeScheduledActionsOutput, bool) bool, ...request.Option) error - - PutScalingPolicy(*applicationautoscaling.PutScalingPolicyInput) (*applicationautoscaling.PutScalingPolicyOutput, error) - PutScalingPolicyWithContext(aws.Context, *applicationautoscaling.PutScalingPolicyInput, ...request.Option) (*applicationautoscaling.PutScalingPolicyOutput, error) - PutScalingPolicyRequest(*applicationautoscaling.PutScalingPolicyInput) (*request.Request, *applicationautoscaling.PutScalingPolicyOutput) - - PutScheduledAction(*applicationautoscaling.PutScheduledActionInput) (*applicationautoscaling.PutScheduledActionOutput, error) - PutScheduledActionWithContext(aws.Context, *applicationautoscaling.PutScheduledActionInput, ...request.Option) (*applicationautoscaling.PutScheduledActionOutput, error) - PutScheduledActionRequest(*applicationautoscaling.PutScheduledActionInput) (*request.Request, *applicationautoscaling.PutScheduledActionOutput) - - RegisterScalableTarget(*applicationautoscaling.RegisterScalableTargetInput) (*applicationautoscaling.RegisterScalableTargetOutput, error) - RegisterScalableTargetWithContext(aws.Context, *applicationautoscaling.RegisterScalableTargetInput, ...request.Option) (*applicationautoscaling.RegisterScalableTargetOutput, error) - RegisterScalableTargetRequest(*applicationautoscaling.RegisterScalableTargetInput) (*request.Request, *applicationautoscaling.RegisterScalableTargetOutput) -} - -var _ ApplicationAutoScalingAPI = (*applicationautoscaling.ApplicationAutoScaling)(nil) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go deleted file mode 100644 index 4a1a89ee70a88..0000000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/doc.go +++ /dev/null @@ -1,73 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -// Package applicationautoscaling provides the client and types for making API -// requests to Application Auto Scaling. -// -// With Application Auto Scaling, you can configure automatic scaling for the -// following resources: -// -// * Amazon ECS services -// -// * Amazon EC2 Spot Fleet requests -// -// * Amazon EMR clusters -// -// * Amazon AppStream 2.0 fleets -// -// * Amazon DynamoDB tables and global secondary indexes throughput capacity -// -// * Amazon Aurora Replicas -// -// * Amazon SageMaker endpoint variants -// -// * Custom resources provided by your own applications or services -// -// * Amazon Comprehend document classification endpoints -// -// * AWS Lambda function provisioned concurrency -// -// API Summary -// -// The Application Auto Scaling service API includes three key sets of actions: -// -// * Register and manage scalable targets - Register AWS or custom resources -// as scalable targets (a resource that Application Auto Scaling can scale), -// set minimum and maximum capacity limits, and retrieve information on existing -// scalable targets. -// -// * Configure and manage automatic scaling - Define scaling policies to -// dynamically scale your resources in response to CloudWatch alarms, schedule -// one-time or recurring scaling actions, and retrieve your recent scaling -// activity history. -// -// * Suspend and resume scaling - Temporarily suspend and later resume automatic -// scaling by calling the RegisterScalableTarget action for any Application -// Auto Scaling scalable target. You can suspend and resume, individually -// or in combination, scale-out activities triggered by a scaling policy, -// scale-in activities triggered by a scaling policy, and scheduled scaling. -// -// To learn more about Application Auto Scaling, including information about -// granting IAM users required permissions for Application Auto Scaling actions, -// see the Application Auto Scaling User Guide (https://docs.aws.amazon.com/autoscaling/application/userguide/what-is-application-auto-scaling.html). -// -// See https://docs.aws.amazon.com/goto/WebAPI/application-autoscaling-2016-02-06 for more information on this service. -// -// See applicationautoscaling package documentation for more information. -// https://docs.aws.amazon.com/sdk-for-go/api/service/applicationautoscaling/ -// -// Using the Client -// -// To contact Application Auto Scaling with the SDK use the New function to create -// a new service client. With that client you can make API requests to the service. -// These clients are safe to use concurrently. -// -// See the SDK's documentation for more information on how to use the SDK. -// https://docs.aws.amazon.com/sdk-for-go/api/ -// -// See aws.Config documentation for more information on configuring SDK clients. -// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config -// -// See the Application Auto Scaling client ApplicationAutoScaling for more -// information on creating client for this service. -// https://docs.aws.amazon.com/sdk-for-go/api/service/applicationautoscaling/#New -package applicationautoscaling diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/errors.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/errors.go deleted file mode 100644 index 7d4101013826f..0000000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/errors.go +++ /dev/null @@ -1,60 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package applicationautoscaling - -const ( - - // ErrCodeConcurrentUpdateException for service response error code - // "ConcurrentUpdateException". - // - // Concurrent updates caused an exception, for example, if you request an update - // to an Application Auto Scaling resource that already has a pending update. - ErrCodeConcurrentUpdateException = "ConcurrentUpdateException" - - // ErrCodeFailedResourceAccessException for service response error code - // "FailedResourceAccessException". - // - // Failed access to resources caused an exception. This exception is thrown - // when Application Auto Scaling is unable to retrieve the alarms associated - // with a scaling policy due to a client error, for example, if the role ARN - // specified for a scalable target does not have permission to call the CloudWatch - // DescribeAlarms (https://docs.aws.amazon.com/AmazonCloudWatch/latest/APIReference/API_DescribeAlarms.html) - // on your behalf. - ErrCodeFailedResourceAccessException = "FailedResourceAccessException" - - // ErrCodeInternalServiceException for service response error code - // "InternalServiceException". - // - // The service encountered an internal error. - ErrCodeInternalServiceException = "InternalServiceException" - - // ErrCodeInvalidNextTokenException for service response error code - // "InvalidNextTokenException". - // - // The next token supplied was invalid. - ErrCodeInvalidNextTokenException = "InvalidNextTokenException" - - // ErrCodeLimitExceededException for service response error code - // "LimitExceededException". - // - // A per-account resource limit is exceeded. For more information, see Application - // Auto Scaling Limits (https://docs.aws.amazon.com/ApplicationAutoScaling/latest/userguide/application-auto-scaling-limits.html). - ErrCodeLimitExceededException = "LimitExceededException" - - // ErrCodeObjectNotFoundException for service response error code - // "ObjectNotFoundException". - // - // The specified object could not be found. For any operation that depends on - // the existence of a scalable target, this exception is thrown if the scalable - // target with the specified service namespace, resource ID, and scalable dimension - // does not exist. For any operation that deletes or deregisters a resource, - // this exception is thrown if the resource cannot be found. - ErrCodeObjectNotFoundException = "ObjectNotFoundException" - - // ErrCodeValidationException for service response error code - // "ValidationException". - // - // An exception was thrown for a validation issue. Review the available parameters - // for the API request. - ErrCodeValidationException = "ValidationException" -) diff --git a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go b/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go deleted file mode 100644 index e7e33c75389a3..0000000000000 --- a/vendor/github.com/aws/aws-sdk-go/service/applicationautoscaling/service.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by private/model/cli/gen-api/main.go. DO NOT EDIT. - -package applicationautoscaling - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/signer/v4" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -// ApplicationAutoScaling provides the API operation methods for making requests to -// Application Auto Scaling. See this package's package overview docs -// for details on the service. -// -// ApplicationAutoScaling methods are safe to use concurrently. It is not safe to -// modify mutate any of the struct's properties though. -type ApplicationAutoScaling struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// Service information constants -const ( - ServiceName = "autoscaling" // Name of service. - EndpointsID = "application-autoscaling" // ID to lookup a service endpoint with. - ServiceID = "Application Auto Scaling" // ServiceID is a unique identifer of a specific service. -) - -// New creates a new instance of the ApplicationAutoScaling client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// mySession := session.Must(session.NewSession()) -// -// // Create a ApplicationAutoScaling client from just a session. -// svc := applicationautoscaling.New(mySession) -// -// // Create a ApplicationAutoScaling client with additional configuration -// svc := applicationautoscaling.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *ApplicationAutoScaling { - c := p.ClientConfig(EndpointsID, cfgs...) - if c.SigningNameDerived || len(c.SigningName) == 0 { - c.SigningName = "application-autoscaling" - } - return newClient(*c.Config, c.Handlers, c.PartitionID, c.Endpoint, c.SigningRegion, c.SigningName) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, partitionID, endpoint, signingRegion, signingName string) *ApplicationAutoScaling { - svc := &ApplicationAutoScaling{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - ServiceID: ServiceID, - SigningName: signingName, - SigningRegion: signingRegion, - PartitionID: partitionID, - Endpoint: endpoint, - APIVersion: "2016-02-06", - JSONVersion: "1.1", - TargetPrefix: "AnyScaleFrontendService", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBackNamed(v4.SignRequestHandler) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a ApplicationAutoScaling operation and runs any -// custom request initialization. -func (c *ApplicationAutoScaling) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go deleted file mode 100644 index 2de3209329d8d..0000000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/aws_autoscaling.go +++ /dev/null @@ -1,226 +0,0 @@ -package aws - -import ( - "context" - "fmt" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/service/applicationautoscaling" - "github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface" - "github.com/go-kit/kit/log/level" - "github.com/prometheus/client_golang/prometheus" - "github.com/weaveworks/common/instrument" - - "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" -) - -const ( - autoScalingPolicyNamePrefix = "DynamoScalingPolicy_cortex_" -) - -var applicationAutoScalingRequestDuration = instrument.NewHistogramCollector(prometheus.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: "cortex", - Name: "application_autoscaling_request_duration_seconds", - Help: "Time spent doing ApplicationAutoScaling requests.", - - // AWS latency seems to range from a few ms to a few sec. So use 8 buckets - // from 128us to 2s. TODO: Confirm that this is the case for ApplicationAutoScaling. - Buckets: prometheus.ExponentialBuckets(0.000128, 4, 8), -}, []string{"operation", "status_code"})) - -func init() { - applicationAutoScalingRequestDuration.Register() -} - -type awsAutoscale struct { - call callManager - ApplicationAutoScaling applicationautoscalingiface.ApplicationAutoScalingAPI -} - -func newAWSAutoscale(cfg DynamoDBConfig, callManager callManager) (*awsAutoscale, error) { - session, err := awsSessionFromURL(cfg.ApplicationAutoScaling.URL) - if err != nil { - return nil, err - } - return &awsAutoscale{ - call: callManager, - ApplicationAutoScaling: applicationautoscaling.New(session), - }, nil -} - -func (a *awsAutoscale) PostCreateTable(ctx context.Context, desc chunk.TableDesc) error { - if desc.WriteScale.Enabled { - return a.enableAutoScaling(ctx, desc) - } - return nil -} - -func (a *awsAutoscale) DescribeTable(ctx context.Context, desc *chunk.TableDesc) error { - err := a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DescribeScalableTargetsWithContext", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - out, err := a.ApplicationAutoScaling.DescribeScalableTargetsWithContext(ctx, &applicationautoscaling.DescribeScalableTargetsInput{ - ResourceIds: []*string{aws.String("table/" + desc.Name)}, - ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), - ServiceNamespace: aws.String("dynamodb"), - }) - if err != nil { - return err - } - switch l := len(out.ScalableTargets); l { - case 0: - return err - case 1: - desc.WriteScale.Enabled = true - if target := out.ScalableTargets[0]; target != nil { - if target.RoleARN != nil { - desc.WriteScale.RoleARN = *target.RoleARN - } - if target.MinCapacity != nil { - desc.WriteScale.MinCapacity = *target.MinCapacity - } - if target.MaxCapacity != nil { - desc.WriteScale.MaxCapacity = *target.MaxCapacity - } - } - return err - default: - return fmt.Errorf("more than one scalable target found for DynamoDB table") - } - }) - }) - if err != nil { - return err - } - - err = a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DescribeScalingPoliciesWithContext", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - out, err := a.ApplicationAutoScaling.DescribeScalingPoliciesWithContext(ctx, &applicationautoscaling.DescribeScalingPoliciesInput{ - PolicyNames: []*string{aws.String(autoScalingPolicyNamePrefix + desc.Name)}, - ResourceId: aws.String("table/" + desc.Name), - ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), - ServiceNamespace: aws.String("dynamodb"), - }) - if err != nil { - return err - } - switch l := len(out.ScalingPolicies); l { - case 0: - return err - case 1: - config := out.ScalingPolicies[0].TargetTrackingScalingPolicyConfiguration - if config != nil { - if config.ScaleInCooldown != nil { - desc.WriteScale.InCooldown = *config.ScaleInCooldown - } - if config.ScaleOutCooldown != nil { - desc.WriteScale.OutCooldown = *config.ScaleOutCooldown - } - if config.TargetValue != nil { - desc.WriteScale.TargetValue = *config.TargetValue - } - } - return err - default: - return fmt.Errorf("more than one scaling policy found for DynamoDB table") - } - }) - }) - return err -} - -func (a *awsAutoscale) UpdateTable(ctx context.Context, current chunk.TableDesc, expected *chunk.TableDesc) error { - var err error - if !current.WriteScale.Enabled { - if expected.WriteScale.Enabled { - level.Info(util.Logger).Log("msg", "enabling autoscaling on table", "table") - err = a.enableAutoScaling(ctx, *expected) - } - } else { - if !expected.WriteScale.Enabled { - level.Info(util.Logger).Log("msg", "disabling autoscaling on table", "table") - err = a.disableAutoScaling(ctx, *expected) - } else if current.WriteScale != expected.WriteScale { - level.Info(util.Logger).Log("msg", "enabling autoscaling on table", "table") - err = a.enableAutoScaling(ctx, *expected) - } - } - return err -} - -func (a *awsAutoscale) enableAutoScaling(ctx context.Context, desc chunk.TableDesc) error { - // Registers or updates a scalable target - if err := a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.RegisterScalableTarget", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - input := &applicationautoscaling.RegisterScalableTargetInput{ - MinCapacity: aws.Int64(desc.WriteScale.MinCapacity), - MaxCapacity: aws.Int64(desc.WriteScale.MaxCapacity), - ResourceId: aws.String("table/" + desc.Name), - RoleARN: aws.String(desc.WriteScale.RoleARN), - ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), - ServiceNamespace: aws.String("dynamodb"), - } - _, err := a.ApplicationAutoScaling.RegisterScalableTarget(input) - if err != nil { - return err - } - return nil - }) - }); err != nil { - return err - } - - // Puts or updates a scaling policy - return a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.PutScalingPolicy", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - input := &applicationautoscaling.PutScalingPolicyInput{ - PolicyName: aws.String(autoScalingPolicyNamePrefix + desc.Name), - PolicyType: aws.String("TargetTrackingScaling"), - ResourceId: aws.String("table/" + desc.Name), - ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), - ServiceNamespace: aws.String("dynamodb"), - TargetTrackingScalingPolicyConfiguration: &applicationautoscaling.TargetTrackingScalingPolicyConfiguration{ - PredefinedMetricSpecification: &applicationautoscaling.PredefinedMetricSpecification{ - PredefinedMetricType: aws.String("DynamoDBWriteCapacityUtilization"), - }, - ScaleInCooldown: aws.Int64(desc.WriteScale.InCooldown), - ScaleOutCooldown: aws.Int64(desc.WriteScale.OutCooldown), - TargetValue: aws.Float64(desc.WriteScale.TargetValue), - }, - } - _, err := a.ApplicationAutoScaling.PutScalingPolicy(input) - return err - }) - }) -} - -func (a *awsAutoscale) disableAutoScaling(ctx context.Context, desc chunk.TableDesc) error { - // Deregister scalable target - if err := a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DeregisterScalableTarget", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - input := &applicationautoscaling.DeregisterScalableTargetInput{ - ResourceId: aws.String("table/" + desc.Name), - ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), - ServiceNamespace: aws.String("dynamodb"), - } - _, err := a.ApplicationAutoScaling.DeregisterScalableTarget(input) - return err - }) - }); err != nil { - return err - } - - // Delete scaling policy - return a.call.backoffAndRetry(ctx, func(ctx context.Context) error { - return instrument.CollectedRequest(ctx, "ApplicationAutoScaling.DeleteScalingPolicy", applicationAutoScalingRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - input := &applicationautoscaling.DeleteScalingPolicyInput{ - PolicyName: aws.String(autoScalingPolicyNamePrefix + desc.Name), - ResourceId: aws.String("table/" + desc.Name), - ScalableDimension: aws.String("dynamodb:table:WriteCapacityUnits"), - ServiceNamespace: aws.String("dynamodb"), - } - _, err := a.ApplicationAutoScaling.DeleteScalingPolicy(input) - return err - }) - }) -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index fa2b6679fdc96..a7b1e84f6df13 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -99,13 +99,12 @@ func init() { // DynamoDBConfig specifies config for a DynamoDB database. type DynamoDBConfig struct { - DynamoDB flagext.URLValue - APILimit float64 - ThrottleLimit float64 - ApplicationAutoScaling flagext.URLValue - Metrics MetricsAutoScalingConfig - ChunkGangSize int - ChunkGetMaxParallelism int + DynamoDB flagext.URLValue `yaml:"dynamodb_url"` + APILimit float64 `yaml:"api_limit"` + ThrottleLimit float64 `yaml:"throttle_limit"` + Metrics MetricsAutoScalingConfig `yaml:"metrics"` + ChunkGangSize int `yaml:"chunk_gang_size"` + ChunkGetMaxParallelism int `yaml:"chunk_get_max_parallelism"` backoffConfig util.BackoffConfig } @@ -115,9 +114,8 @@ func (cfg *DynamoDBConfig) RegisterFlags(f *flag.FlagSet) { "If only region is specified as a host, proper endpoint will be deduced. Use inmemory:/// to use a mock in-memory implementation.") f.Float64Var(&cfg.APILimit, "dynamodb.api-limit", 2.0, "DynamoDB table management requests per second limit.") f.Float64Var(&cfg.ThrottleLimit, "dynamodb.throttle-limit", 10.0, "DynamoDB rate cap to back off when throttled.") - f.Var(&cfg.ApplicationAutoScaling, "applicationautoscaling.url", "ApplicationAutoscaling endpoint URL with escaped Key and Secret encoded.") - f.IntVar(&cfg.ChunkGangSize, "dynamodb.chunk.gang.size", 10, "Number of chunks to group together to parallelise fetches (zero to disable)") - f.IntVar(&cfg.ChunkGetMaxParallelism, "dynamodb.chunk.get.max.parallelism", 32, "Max number of chunk-get operations to start in parallel") + f.IntVar(&cfg.ChunkGangSize, "dynamodb.chunk-gang-size", 10, "Number of chunks to group together to parallelise fetches (zero to disable)") + f.IntVar(&cfg.ChunkGetMaxParallelism, "dynamodb.chunk.get-max-parallelism", 32, "Max number of chunk-get operations to start in parallel") f.DurationVar(&cfg.backoffConfig.MinBackoff, "dynamodb.min-backoff", 100*time.Millisecond, "Minimum backoff time") f.DurationVar(&cfg.backoffConfig.MaxBackoff, "dynamodb.max-backoff", 50*time.Second, "Maximum backoff time") f.IntVar(&cfg.backoffConfig.MaxRetries, "dynamodb.max-retries", 20, "Maximum number of times to retry an operation") @@ -126,8 +124,8 @@ func (cfg *DynamoDBConfig) RegisterFlags(f *flag.FlagSet) { // StorageConfig specifies config for storing data on AWS. type StorageConfig struct { - DynamoDBConfig - S3Config `yaml:",inline"` + DynamoDBConfig `yaml:"dynamodb"` + S3Config `yaml:",inline"` } // RegisterFlags adds the flags required to config this to the given FlagSet diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go index 84ae8c3c22990..1e3ff1102fa47 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go @@ -50,13 +50,6 @@ func NewDynamoDBTableClient(cfg DynamoDBConfig) (chunk.TableClient, error) { } var autoscale autoscale - if cfg.ApplicationAutoScaling.URL != nil { - autoscale, err = newAWSAutoscale(cfg, callManager) - if err != nil { - return nil, err - } - } - if cfg.Metrics.URL != "" { autoscale, err = newMetrics(cfg) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go index d0f41e4685b88..a9bf9dd96c65a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/fixtures.go @@ -47,7 +47,8 @@ var Fixtures = []testutils.Fixture{ schemaCfg: schemaConfig, } object := objectclient.NewClient(&S3ObjectClient{ - S3: newMockS3(), + S3: newMockS3(), + delimiter: chunk.DirDelim, }, nil) return index, object, table, schemaConfig, nil }, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go index 871516fdb38fd..3df859cbdcf5c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go @@ -41,17 +41,15 @@ const ( // MetricsAutoScalingConfig holds parameters to configure how it works type MetricsAutoScalingConfig struct { - URL string // URL to contact Prometheus store on - TargetQueueLen int64 // Queue length above which we will scale up capacity - ScaleUpFactor float64 // Scale up capacity by this multiple - MinThrottling float64 // Ignore throttling below this level - QueueLengthQuery string // Promql query to fetch ingester queue length - ThrottleQuery string // Promql query to fetch throttle rate per table - UsageQuery string // Promql query to fetch write capacity usage per table - ReadUsageQuery string // Promql query to fetch read usage per table - ReadErrorQuery string // Promql query to fetch read errors per table - - deprecatedErrorRateQuery string + URL string `yaml:"url"` // URL to contact Prometheus store on + TargetQueueLen int64 `yaml:"target_queue_length"` // Queue length above which we will scale up capacity + ScaleUpFactor float64 `yaml:"scale_up_factor"` // Scale up capacity by this multiple + MinThrottling float64 `yaml:"ignore_throttle_below"` // Ignore throttling below this level + QueueLengthQuery string `yaml:"queue_length_query"` // Promql query to fetch ingester queue length + ThrottleQuery string `yaml:"write_throttle_query"` // Promql query to fetch throttle rate per table + UsageQuery string `yaml:"write_usage_query"` // Promql query to fetch write capacity usage per table + ReadUsageQuery string `yaml:"read_usage_query"` // Promql query to fetch read usage per table + ReadErrorQuery string `yaml:"read_error_query"` // Promql query to fetch read errors per table } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -65,8 +63,6 @@ func (cfg *MetricsAutoScalingConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.UsageQuery, "metrics.usage-query", defaultUsageQuery, "query to fetch write capacity usage per table") f.StringVar(&cfg.ReadUsageQuery, "metrics.read-usage-query", defaultReadUsageQuery, "query to fetch read capacity usage per table") f.StringVar(&cfg.ReadErrorQuery, "metrics.read-error-query", defaultReadErrorQuery, "query to fetch read errors per table") - - f.StringVar(&cfg.deprecatedErrorRateQuery, "metrics.error-rate-query", "", "DEPRECATED: use -metrics.write-throttle-query instead") } type metricsData struct { @@ -83,10 +79,6 @@ type metricsData struct { } func newMetrics(cfg DynamoDBConfig) (*metricsData, error) { - if cfg.Metrics.deprecatedErrorRateQuery != "" { - level.Warn(util.Logger).Log("msg", "use of deprecated flag -metrics.error-rate-query") - cfg.Metrics.ThrottleQuery = cfg.Metrics.deprecatedErrorRateQuery - } client, err := promApi.NewClient(promApi.Config{Address: cfg.Metrics.URL}) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index 7083d77321fd5..f9fc0c5401141 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -58,10 +58,11 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { type S3ObjectClient struct { bucketNames []string S3 s3iface.S3API + delimiter string } // NewS3ObjectClient makes a new S3-backed ObjectClient. -func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { +func NewS3ObjectClient(cfg S3Config, delimiter string) (*S3ObjectClient, error) { if cfg.S3.URL == nil { return nil, fmt.Errorf("no URL specified for S3") } @@ -86,6 +87,7 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { client := S3ObjectClient{ S3: s3Client, bucketNames: bucketNames, + delimiter: delimiter, } return &client, nil } @@ -175,7 +177,7 @@ func (a *S3ObjectClient) List(ctx context.Context, prefix string) ([]chunk.Stora input := s3.ListObjectsV2Input{ Bucket: aws.String(a.bucketNames[i]), Prefix: aws.String(prefix), - Delimiter: aws.String(chunk.DirDelim), + Delimiter: aws.String(a.delimiter), } for { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go index df4a337351656..e38ca50e572bb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go @@ -13,6 +13,7 @@ import ( "github.com/Azure/azure-storage-blob-go/azblob" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) @@ -35,16 +36,21 @@ type BlobStorageConfig struct { // RegisterFlags adds the flags required to config this to the given FlagSet func (c *BlobStorageConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&c.ContainerName, "azure.container-name", "cortex", "Name of the blob container used to store chunks. Defaults to `cortex`. This container must be created before running cortex.") - f.StringVar(&c.AccountName, "azure.account-name", "", "The Microsoft Azure account name to be used") - f.Var(&c.AccountKey, "azure.account-key", "The Microsoft Azure account key to use.") - f.DurationVar(&c.RequestTimeout, "azure.request-timeout", 30*time.Second, "Timeout for requests made against azure blob storage. Defaults to 30 seconds.") - f.IntVar(&c.DownloadBufferSize, "azure.download-buffer-size", 512000, "Preallocated buffer size for downloads (default is 512KB)") - f.IntVar(&c.UploadBufferSize, "azure.upload-buffer-size", 256000, "Preallocated buffer size for up;oads (default is 256KB)") - f.IntVar(&c.UploadBufferCount, "azure.download-buffer-count", 1, "Number of buffers used to used to upload a chunk. (defaults to 1)") - f.IntVar(&c.MaxRetries, "azure.max-retries", 5, "Number of retries for a request which times out.") - f.DurationVar(&c.MinRetryDelay, "azure.min-retry-delay", 10*time.Millisecond, "Minimum time to wait before retrying a request.") - f.DurationVar(&c.MaxRetryDelay, "azure.max-retry-delay", 500*time.Millisecond, "Maximum time to wait before retrying a request.") + c.RegisterFlagsWithPrefix("", f) +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (c *BlobStorageConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&c.ContainerName, prefix+"azure.container-name", "cortex", "Name of the blob container used to store chunks. Defaults to `cortex`. This container must be created before running cortex.") + f.StringVar(&c.AccountName, prefix+"azure.account-name", "", "The Microsoft Azure account name to be used") + f.Var(&c.AccountKey, prefix+"azure.account-key", "The Microsoft Azure account key to use.") + f.DurationVar(&c.RequestTimeout, prefix+"azure.request-timeout", 30*time.Second, "Timeout for requests made against azure blob storage. Defaults to 30 seconds.") + f.IntVar(&c.DownloadBufferSize, prefix+"azure.download-buffer-size", 512000, "Preallocated buffer size for downloads (default is 512KB)") + f.IntVar(&c.UploadBufferSize, prefix+"azure.upload-buffer-size", 256000, "Preallocated buffer size for up;oads (default is 256KB)") + f.IntVar(&c.UploadBufferCount, prefix+"azure.download-buffer-count", 1, "Number of buffers used to used to upload a chunk. (defaults to 1)") + f.IntVar(&c.MaxRetries, prefix+"azure.max-retries", 5, "Number of retries for a request which times out.") + f.DurationVar(&c.MinRetryDelay, prefix+"azure.min-retry-delay", 10*time.Millisecond, "Minimum time to wait before retrying a request.") + f.DurationVar(&c.MaxRetryDelay, prefix+"azure.max-retry-delay", 500*time.Millisecond, "Maximum time to wait before retrying a request.") } // BlobStorage is used to interact with azure blob storage for setting or getting time series chunks. @@ -53,11 +59,16 @@ type BlobStorage struct { //blobService storage.Serv cfg *BlobStorageConfig containerURL azblob.ContainerURL + delimiter string } // NewBlobStorage creates a new instance of the BlobStorage struct. -func NewBlobStorage(cfg *BlobStorageConfig) (*BlobStorage, error) { - blobStorage := &BlobStorage{cfg: cfg} +func NewBlobStorage(cfg *BlobStorageConfig, delimiter string) (*BlobStorage, error) { + util.WarnExperimentalUse("Azure Blob Storage") + blobStorage := &BlobStorage{ + cfg: cfg, + delimiter: delimiter, + } var err error blobStorage.containerURL, err = blobStorage.buildContainerURL() @@ -165,7 +176,7 @@ func (b *BlobStorage) List(ctx context.Context, prefix string) ([]chunk.StorageO return nil, ctx.Err() } - listBlob, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, chunk.DirDelim, azblob.ListBlobsSegmentOptions{Prefix: prefix}) + listBlob, err := b.containerURL.ListBlobsHierarchySegment(ctx, marker, b.delimiter, azblob.ListBlobsSegmentOptions{Prefix: prefix}) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go index 861d7e8160430..9ada6b61602f6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/background.go @@ -26,14 +26,14 @@ var ( // BackgroundConfig is config for a Background Cache. type BackgroundConfig struct { - WriteBackGoroutines int `yaml:"writeback_goroutines,omitempty"` - WriteBackBuffer int `yaml:"writeback_buffer,omitempty"` + WriteBackGoroutines int `yaml:"writeback_goroutines"` + WriteBackBuffer int `yaml:"writeback_buffer"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (cfg *BackgroundConfig) RegisterFlagsWithPrefix(prefix string, description string, f *flag.FlagSet) { - f.IntVar(&cfg.WriteBackGoroutines, prefix+"memcache.write-back-goroutines", 10, description+"How many goroutines to use to write back to memcache.") - f.IntVar(&cfg.WriteBackBuffer, prefix+"memcache.write-back-buffer", 10000, description+"How many key batches to buffer for background write-back.") + f.IntVar(&cfg.WriteBackGoroutines, prefix+"background.write-back-concurrency", 10, description+"At what concurrency to write back to cache.") + f.IntVar(&cfg.WriteBackBuffer, prefix+"background.write-back-buffer", 10000, description+"How many key batches to buffer for background write-back.") } type backgroundCache struct { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go index 5f2d0a4642fe2..f47be182cd469 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/cache.go @@ -5,6 +5,8 @@ import ( "errors" "flag" "time" + + "github.com/prometheus/client_golang/prometheus" ) // Cache byte arrays by key. @@ -22,18 +24,18 @@ type Cache interface { // Config for building Caches. type Config struct { - EnableFifoCache bool `yaml:"enable_fifocache,omitempty"` + EnableFifoCache bool `yaml:"enable_fifocache"` - DefaultValidity time.Duration `yaml:"default_validity,omitempty"` + DefaultValidity time.Duration `yaml:"default_validity"` - Background BackgroundConfig `yaml:"background,omitempty"` - Memcache MemcachedConfig `yaml:"memcached,omitempty"` - MemcacheClient MemcachedClientConfig `yaml:"memcached_client,omitempty"` - Redis RedisConfig `yaml:"redis,omitempty"` - Fifocache FifoCacheConfig `yaml:"fifocache,omitempty"` + Background BackgroundConfig `yaml:"background"` + Memcache MemcachedConfig `yaml:"memcached"` + MemcacheClient MemcachedClientConfig `yaml:"memcached_client"` + Redis RedisConfig `yaml:"redis"` + Fifocache FifoCacheConfig `yaml:"fifocache"` // This is to name the cache metrics properly. - Prefix string `yaml:"prefix,omitempty" doc:"hidden"` + Prefix string `yaml:"prefix" doc:"hidden"` // For tests to inject specific implementations. Cache Cache `yaml:"-"` @@ -70,16 +72,16 @@ func New(cfg Config) (Cache, error) { caches = append(caches, Instrument(cfg.Prefix+"fifocache", cache)) } - if cfg.MemcacheClient.Host != "" && cfg.Redis.Endpoint != "" { + if (cfg.MemcacheClient.Host != "" || cfg.MemcacheClient.Addresses != "") && cfg.Redis.Endpoint != "" { return nil, errors.New("use of multiple cache storage systems is not supported") } - if cfg.MemcacheClient.Host != "" { + if cfg.MemcacheClient.Host != "" || cfg.MemcacheClient.Addresses != "" { if cfg.Memcache.Expiration == 0 && cfg.DefaultValidity != 0 { cfg.Memcache.Expiration = cfg.DefaultValidity } - client := NewMemcachedClient(cfg.MemcacheClient) + client := NewMemcachedClient(cfg.MemcacheClient, cfg.Prefix, prometheus.DefaultRegisterer) cache := NewMemcached(cfg.Memcache, client, cfg.Prefix) cacheName := cfg.Prefix + "memcache" diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go index 9eb3cd87ec876..507def7418744 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go @@ -9,6 +9,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + + "github.com/cortexproject/cortex/pkg/util" ) var ( @@ -71,8 +73,8 @@ var ( // FifoCacheConfig holds config for the FifoCache. type FifoCacheConfig struct { - Size int `yaml:"size,omitempty"` - Validity time.Duration `yaml:"validity,omitempty"` + Size int `yaml:"size"` + Validity time.Duration `yaml:"validity"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet @@ -113,6 +115,8 @@ type cacheEntry struct { // NewFifoCache returns a new initialised FifoCache of size. // TODO(bwplotka): Fix metrics, get them out of globals, separate or allow prefixing. func NewFifoCache(name string, cfg FifoCacheConfig) *FifoCache { + util.WarnExperimentalUse("In-memory (FIFO) cache") + cache := &FifoCache{ size: cfg.Size, validity: cfg.Validity, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go index b56a9206c1bdc..0b14180e11f48 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go @@ -41,16 +41,16 @@ func (o observableVecCollector) After(method, statusCode string, start time.Time // MemcachedConfig is config to make a Memcached type MemcachedConfig struct { - Expiration time.Duration `yaml:"expiration,omitempty"` + Expiration time.Duration `yaml:"expiration"` - BatchSize int `yaml:"batch_size,omitempty"` - Parallelism int `yaml:"parallelism,omitempty"` + BatchSize int `yaml:"batch_size"` + Parallelism int `yaml:"parallelism"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (cfg *MemcachedConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { f.DurationVar(&cfg.Expiration, prefix+"memcached.expiration", 0, description+"How long keys stay in the memcache.") - f.IntVar(&cfg.BatchSize, prefix+"memcached.batchsize", 0, description+"How many keys to fetch in each batch.") + f.IntVar(&cfg.BatchSize, prefix+"memcached.batchsize", 1024, description+"How many keys to fetch in each batch.") f.IntVar(&cfg.Parallelism, prefix+"memcached.parallelism", 100, description+"Maximum active requests to memcache.") } @@ -149,8 +149,8 @@ func (c *Memcached) Fetch(ctx context.Context, keys []string) (found []string, b func (c *Memcached) fetch(ctx context.Context, keys []string) (found []string, bufs [][]byte, missed []string) { var items map[string]*memcache.Item - err := instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(_ context.Context) error { - sp := opentracing.SpanFromContext(ctx) + err := instr.CollectedRequest(ctx, "Memcache.GetMulti", c.requestDuration, memcacheStatusCode, func(innerCtx context.Context) error { + sp := opentracing.SpanFromContext(innerCtx) sp.LogFields(otlog.Int("keys requested", len(keys))) var err error diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go index 2712a584bc62e..adcd25b9f5980 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go @@ -1,19 +1,32 @@ package cache import ( + "context" "flag" "fmt" "net" "sort" + "strings" "sync" "time" "github.com/bradfitz/gomemcache/memcache" "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/discovery/dns" "github.com/cortexproject/cortex/pkg/util" ) +var ( + memcacheServersDiscovered = promauto.NewGaugeVec(prometheus.GaugeOpts{ + Namespace: "cortex", + Name: "memcache_client_servers", + Help: "The number of memcache servers discovered.", + }, []string{"name"}) +) + // MemcachedClient interface exists for mocking memcacheClient. type MemcachedClient interface { GetMulti(keys []string) (map[string]*memcache.Item, error) @@ -30,36 +43,44 @@ type serverSelector interface { type memcachedClient struct { *memcache.Client serverList serverSelector - hostname string - service string + + hostname string + service string + + addresses []string + provider *dns.Provider quit chan struct{} wait sync.WaitGroup + + numServers prometheus.Gauge } // MemcachedClientConfig defines how a MemcachedClient should be constructed. type MemcachedClientConfig struct { - Host string `yaml:"host,omitempty"` - Service string `yaml:"service,omitempty"` - Timeout time.Duration `yaml:"timeout,omitempty"` - MaxIdleConns int `yaml:"max_idle_conns,omitempty"` - UpdateInterval time.Duration `yaml:"update_interval,omitempty"` - ConsistentHash bool `yaml:"consistent_hash,omitempty"` + Host string `yaml:"host"` + Service string `yaml:"service"` + Addresses string `yaml:"addresses"` // EXPERIMENTAL. + Timeout time.Duration `yaml:"timeout"` + MaxIdleConns int `yaml:"max_idle_conns"` + UpdateInterval time.Duration `yaml:"update_interval"` + ConsistentHash bool `yaml:"consistent_hash"` } // RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet func (cfg *MemcachedClientConfig) RegisterFlagsWithPrefix(prefix, description string, f *flag.FlagSet) { - f.StringVar(&cfg.Host, prefix+"memcached.hostname", "", description+"Hostname for memcached service to use when caching chunks. If empty, no memcached will be used.") + f.StringVar(&cfg.Host, prefix+"memcached.hostname", "", description+"Hostname for memcached service to use. If empty and if addresses is unset, no memcached will be used.") f.StringVar(&cfg.Service, prefix+"memcached.service", "memcached", description+"SRV service used to discover memcache servers.") + f.StringVar(&cfg.Addresses, prefix+"memcached.addresses", "", description+"EXPERIMENTAL: Comma separated addresses list in DNS Service Discovery format: https://cortexmetrics.io/docs/configuration/arguments/#dns-service-discovery") f.IntVar(&cfg.MaxIdleConns, prefix+"memcached.max-idle-conns", 16, description+"Maximum number of idle connections in pool.") f.DurationVar(&cfg.Timeout, prefix+"memcached.timeout", 100*time.Millisecond, description+"Maximum time to wait before giving up on memcached requests.") f.DurationVar(&cfg.UpdateInterval, prefix+"memcached.update-interval", 1*time.Minute, description+"Period with which to poll DNS for memcache servers.") - f.BoolVar(&cfg.ConsistentHash, prefix+"memcached.consistent-hash", false, description+"Use consistent hashing to distribute to memcache servers.") + f.BoolVar(&cfg.ConsistentHash, prefix+"memcached.consistent-hash", true, description+"Use consistent hashing to distribute to memcache servers.") } // NewMemcachedClient creates a new MemcacheClient that gets its server list // from SRV and updates the server list on a regular basis. -func NewMemcachedClient(cfg MemcachedClientConfig) MemcachedClient { +func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Registerer) MemcachedClient { var selector serverSelector if cfg.ConsistentHash { selector = &MemcachedJumpHashSelector{} @@ -71,13 +92,26 @@ func NewMemcachedClient(cfg MemcachedClientConfig) MemcachedClient { client.Timeout = cfg.Timeout client.MaxIdleConns = cfg.MaxIdleConns + dnsProviderRegisterer := prometheus.WrapRegistererWithPrefix("cortex_", prometheus.WrapRegistererWith(prometheus.Labels{ + "name": name, + }, r)) + newClient := &memcachedClient{ Client: client, serverList: selector, hostname: cfg.Host, service: cfg.Service, + provider: dns.NewProvider(util.Logger, dnsProviderRegisterer, dns.GolangResolverType), quit: make(chan struct{}), + + numServers: memcacheServersDiscovered.WithLabelValues(name), + } + + if len(cfg.Addresses) > 0 { + util.WarnExperimentalUse("DNS-based memcached service discovery") + newClient.addresses = strings.Split(cfg.Addresses, ",") } + err := newClient.updateMemcacheServers() if err != nil { level.Error(util.Logger).Log("msg", "error setting memcache servers to host", "host", cfg.Host, "err", err) @@ -114,17 +148,28 @@ func (c *memcachedClient) updateLoop(updateInterval time.Duration) { // updateMemcacheServers sets a memcache server list from SRV records. SRV // priority & weight are ignored. func (c *memcachedClient) updateMemcacheServers() error { - _, addrs, err := net.LookupSRV(c.service, "tcp", c.hostname) - if err != nil { - return err - } var servers []string - for _, srv := range addrs { - servers = append(servers, fmt.Sprintf("%s:%d", srv.Target, srv.Port)) + + if len(c.addresses) > 0 { + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + c.provider.Resolve(ctx, c.addresses) + servers = c.provider.Addresses() + } else { + _, addrs, err := net.LookupSRV(c.service, "tcp", c.hostname) + if err != nil { + return err + } + for _, srv := range addrs { + servers = append(servers, fmt.Sprintf("%s:%d", srv.Target, srv.Port)) + } } + // ServerList deterministically maps keys to _index_ of the server list. // Since DNS returns records in different order each time, we sort to // guarantee best possible match between nodes. sort.Strings(servers) + c.numServers.Set(float64(len(servers))) return c.serverList.SetServers(servers...) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/mock.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/mock.go index e44be6cbf37e7..6503aea80dc0c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/mock.go @@ -36,9 +36,14 @@ func (m *mockCache) Fetch(ctx context.Context, keys []string) (found []string, b func (m *mockCache) Stop() { } -// NewMockCache makes a new MockCache +// NewMockCache makes a new MockCache. func NewMockCache() Cache { return &mockCache{ cache: map[string][]byte{}, } } + +// NewNoopCache returns a no-op cache. +func NewNoopCache() Cache { + return NewTiered(nil) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go index 7ab48d2c67d76..2325531d5060f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go @@ -22,11 +22,11 @@ type RedisCache struct { // RedisConfig defines how a RedisCache should be constructed. type RedisConfig struct { - Endpoint string `yaml:"endpoint,omitempty"` - Timeout time.Duration `yaml:"timeout,omitempty"` - Expiration time.Duration `yaml:"expiration,omitempty"` - MaxIdleConns int `yaml:"max_idle_conns,omitempty"` - MaxActiveConns int `yaml:"max_active_conns,omitempty"` + Endpoint string `yaml:"endpoint"` + Timeout time.Duration `yaml:"timeout"` + Expiration time.Duration `yaml:"expiration"` + MaxIdleConns int `yaml:"max_idle_conns"` + MaxActiveConns int `yaml:"max_active_conns"` Password flagext.Secret `yaml:"password"` EnableTLS bool `yaml:"enable_tls"` } @@ -44,6 +44,7 @@ func (cfg *RedisConfig) RegisterFlagsWithPrefix(prefix, description string, f *f // NewRedisCache creates a new RedisCache func NewRedisCache(cfg RedisConfig, name string, pool *redis.Pool) *RedisCache { + util.WarnExperimentalUse("Redis cache") // pool != nil only in unit tests if pool == nil { pool = &redis.Pool{ diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index f3aac2f86fd8e..6a5307d6d160a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -14,27 +14,28 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" + pkgutil "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" ) // Config for a StorageClient type Config struct { - Addresses string `yaml:"addresses,omitempty"` - Port int `yaml:"port,omitempty"` - Keyspace string `yaml:"keyspace,omitempty"` - Consistency string `yaml:"consistency,omitempty"` - ReplicationFactor int `yaml:"replication_factor,omitempty"` - DisableInitialHostLookup bool `yaml:"disable_initial_host_lookup,omitempty"` - SSL bool `yaml:"SSL,omitempty"` - HostVerification bool `yaml:"host_verification,omitempty"` - CAPath string `yaml:"CA_path,omitempty"` - Auth bool `yaml:"auth,omitempty"` - Username string `yaml:"username,omitempty"` - Password flagext.Secret `yaml:"password,omitempty"` - PasswordFile string `yaml:"password_file,omitempty"` + Addresses string `yaml:"addresses"` + Port int `yaml:"port"` + Keyspace string `yaml:"keyspace"` + Consistency string `yaml:"consistency"` + ReplicationFactor int `yaml:"replication_factor"` + DisableInitialHostLookup bool `yaml:"disable_initial_host_lookup"` + SSL bool `yaml:"SSL"` + HostVerification bool `yaml:"host_verification"` + CAPath string `yaml:"CA_path"` + Auth bool `yaml:"auth"` + Username string `yaml:"username"` + Password flagext.Secret `yaml:"password"` + PasswordFile string `yaml:"password_file"` CustomAuthenticators flagext.StringSlice `yaml:"custom_authenticators"` - Timeout time.Duration `yaml:"timeout,omitempty"` - ConnectTimeout time.Duration `yaml:"connect_timeout,omitempty"` + Timeout time.Duration `yaml:"timeout"` + ConnectTimeout time.Duration `yaml:"connect_timeout"` Retries int `yaml:"max_retries"` MaxBackoff time.Duration `yaml:"retry_max_backoff"` MinBackoff time.Duration `yaml:"retry_min_backoff"` @@ -185,6 +186,8 @@ type StorageClient struct { // NewStorageClient returns a new StorageClient. func NewStorageClient(cfg Config, schemaCfg chunk.SchemaConfig) (*StorageClient, error) { + pkgutil.WarnExperimentalUse("Cassandra Backend") + session, err := cfg.session() if err != nil { return nil, errors.WithStack(err) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 2f9b95e78aecc..ccd45b7395d82 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -21,7 +21,6 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/cache" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" - "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -47,30 +46,29 @@ var ( // StoreConfig specifies config for a ChunkStore type StoreConfig struct { - ChunkCacheConfig cache.Config `yaml:"chunk_cache_config,omitempty"` - WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config,omitempty"` + ChunkCacheConfig cache.Config `yaml:"chunk_cache_config"` + WriteDedupeCacheConfig cache.Config `yaml:"write_dedupe_cache_config"` - CacheLookupsOlderThan time.Duration `yaml:"cache_lookups_older_than,omitempty"` + CacheLookupsOlderThan time.Duration `yaml:"cache_lookups_older_than"` // Limits query start time to be greater than now() - MaxLookBackPeriod, if set. MaxLookBackPeriod time.Duration `yaml:"max_look_back_period"` - // Not visible in yaml because the setting shouldn't be common between ingesters and queriers + // Not visible in yaml because the setting shouldn't be common between ingesters and queriers. + // This exists in case we don't want to cache all the chunks but still want to take advantage of + // ingester chunk write deduplication. But for the queriers we need the full value. So when this option + // is set, use different caches for ingesters and queriers. chunkCacheStubs bool // don't write the full chunk to cache, just a stub entry } // RegisterFlags adds the flags required to config this to the given FlagSet func (cfg *StoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("", "Cache config for chunks. ", f) - f.BoolVar(&cfg.chunkCacheStubs, "store.chunk-cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.") + cfg.ChunkCacheConfig.RegisterFlagsWithPrefix("store.chunks-cache.", "Cache config for chunks. ", f) + f.BoolVar(&cfg.chunkCacheStubs, "store.chunks-cache.cache-stubs", false, "If true, don't write the full chunk to cache, just a stub entry.") cfg.WriteDedupeCacheConfig.RegisterFlagsWithPrefix("store.index-cache-write.", "Cache config for index entry writing. ", f) f.DurationVar(&cfg.CacheLookupsOlderThan, "store.cache-lookups-older-than", 0, "Cache index entries older than this period. 0 to disable.") f.DurationVar(&cfg.MaxLookBackPeriod, "store.max-look-back-period", 0, "Limit how long back data can be queried") - - // Deprecated. - flagext.DeprecatedFlag(f, "store.cardinality-cache-size", "DEPRECATED. Use store.index-cache-read.enable-fifocache and store.index-cache-read.fifocache.size instead.") - flagext.DeprecatedFlag(f, "store.cardinality-cache-validity", "DEPRECATED. Use store.index-cache-read.enable-fifocache and store.index-cache-read.fifocache.duration instead.") } // store implements Store @@ -84,8 +82,8 @@ type store struct { *Fetcher } -func newStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, limits StoreLimits) (Store, error) { - fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, cfg.chunkCacheStubs, chunks) +func newStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, limits StoreLimits, chunksCache cache.Cache) (Store, error) { + fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index 27a5a84fe97cb..78cd7c14fe49e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -99,16 +99,10 @@ type decodeResponse struct { } // NewChunkFetcher makes a new ChunkFetcher. -func NewChunkFetcher(cfg cache.Config, cacheStubs bool, storage Client) (*Fetcher, error) { - cfg.Prefix = "chunks" - cache, err := cache.New(cfg) - if err != nil { - return nil, err - } - +func NewChunkFetcher(cacher cache.Cache, cacheStubs bool, storage Client) (*Fetcher, error) { c := &Fetcher{ storage: storage, - cache: cache, + cache: cacher, cacheStubs: cacheStubs, decodeRequests: make(chan decodeRequest), } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index fdb30d339fa32..366ca1de86dca 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -7,6 +7,8 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + + "github.com/cortexproject/cortex/pkg/chunk/cache" ) // StoreLimits helps get Limits specific to Queries for Stores @@ -56,15 +58,15 @@ func NewCompositeStore() CompositeStore { } // AddPeriod adds the configuration for a period of time to the CompositeStore -func (c *CompositeStore) AddPeriod(storeCfg StoreConfig, cfg PeriodConfig, index IndexClient, chunks Client, limits StoreLimits) error { +func (c *CompositeStore) AddPeriod(storeCfg StoreConfig, cfg PeriodConfig, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) error { schema := cfg.CreateSchema() var store Store var err error switch cfg.Schema { case "v9", "v10", "v11": - store, err = newSeriesStore(storeCfg, schema, index, chunks, limits) + store, err = newSeriesStore(storeCfg, schema, index, chunks, limits, chunksCache, writeDedupeCache) default: - store, err = newStore(storeCfg, schema, index, chunks, limits) + store, err = newStore(storeCfg, schema, index, chunks, limits, chunksCache) } if err != nil { return err diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go index 807d738a38be8..d15b0213b0d4a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/fixtures.go @@ -35,7 +35,7 @@ var BenchmarkLabels = labels.Labels{ // DefaultSchemaConfig creates a simple schema config for testing func DefaultSchemaConfig(store, schema string, from model.Time) SchemaConfig { - return SchemaConfig{ + s := SchemaConfig{ Configs: []PeriodConfig{{ IndexType: store, Schema: schema, @@ -50,6 +50,10 @@ func DefaultSchemaConfig(store, schema string, from model.Time) SchemaConfig { }, }}, } + if err := s.Validate(); err != nil { + panic(err) + } + return s } // ChunksToMatrix converts a set of chunks to a model.Matrix. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index c163e529f1c71..39a7f44c8788d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -40,8 +40,8 @@ type Config struct { ColumnKey bool `yaml:"-"` DistributeKeys bool `yaml:"-"` - TableCacheEnabled bool - TableCacheExpiration time.Duration + TableCacheEnabled bool `yaml:"table_cache_enabled"` + TableCacheExpiration time.Duration `yaml:"table_cache_expiration"` } // RegisterFlags adds the flags required to config this to the given FlagSet diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go index 0275e7ebe8a9f..123d279ab47ac 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/fixtures.go @@ -79,7 +79,7 @@ func (f *fixture) Clients() ( if f.gcsObjectClient { cClient = objectclient.NewClient(newGCSObjectClient(GCSConfig{ BucketName: "chunks", - }, f.gcssrv.Client()), nil) + }, f.gcssrv.Client(), chunk.DirDelim), nil) } else { cClient = newBigtableObjectClient(Config{}, schemaConfig, client) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go index f8703ef69e3c6..49c497c7367dc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/gcs_object_client.go @@ -13,9 +13,10 @@ import ( ) type GCSObjectClient struct { - cfg GCSConfig - client *storage.Client - bucket *storage.BucketHandle + cfg GCSConfig + client *storage.Client + bucket *storage.BucketHandle + delimiter string } // GCSConfig is config for the GCS Chunk Client. @@ -38,7 +39,7 @@ func (cfg *GCSConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { } // NewGCSObjectClient makes a new chunk.Client that writes chunks to GCS. -func NewGCSObjectClient(ctx context.Context, cfg GCSConfig) (*GCSObjectClient, error) { +func NewGCSObjectClient(ctx context.Context, cfg GCSConfig, delimiter string) (*GCSObjectClient, error) { option, err := gcsInstrumentation(ctx, storage.ScopeReadWrite) if err != nil { return nil, err @@ -48,15 +49,16 @@ func NewGCSObjectClient(ctx context.Context, cfg GCSConfig) (*GCSObjectClient, e if err != nil { return nil, err } - return newGCSObjectClient(cfg, client), nil + return newGCSObjectClient(cfg, client, delimiter), nil } -func newGCSObjectClient(cfg GCSConfig, client *storage.Client) *GCSObjectClient { +func newGCSObjectClient(cfg GCSConfig, client *storage.Client, delimiter string) *GCSObjectClient { bucket := client.Bucket(cfg.BucketName) return &GCSObjectClient{ - cfg: cfg, - client: client, - bucket: bucket, + cfg: cfg, + client: client, + bucket: bucket, + delimiter: delimiter, } } @@ -109,7 +111,7 @@ func (s *GCSObjectClient) PutObject(ctx context.Context, objectKey string, objec func (s *GCSObjectClient) List(ctx context.Context, prefix string) ([]chunk.StorageObject, error) { var storageObjects []chunk.StorageObject - iter := s.bucket.Objects(ctx, &storage.Query{Prefix: prefix, Delimiter: chunk.DirDelim}) + iter := s.bucket.Objects(ctx, &storage.Query{Prefix: prefix, Delimiter: s.delimiter}) for { if ctx.Err() != nil { return nil, ctx.Err() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go index b969181f966c5..9441d42e23800 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go @@ -75,6 +75,8 @@ type DataPurger struct { // NewDataPurger creates a new DataPurger func NewDataPurger(cfg Config, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient) (*DataPurger, error) { + util.WarnExperimentalUse("Delete series API") + dataPurger := DataPurger{ cfg: cfg, deleteStore: deleteStore, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index d98f361cb99a3..28bf1b8b8275e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -14,6 +14,7 @@ import ( yaml "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/flagext" ) const ( @@ -36,7 +37,7 @@ type PeriodConfig struct { ObjectType string `yaml:"object_store"` // type of object client to use; if omitted, defaults to store. Schema string `yaml:"schema"` IndexTables PeriodicTableConfig `yaml:"index"` - ChunkTables PeriodicTableConfig `yaml:"chunks,omitempty"` + ChunkTables PeriodicTableConfig `yaml:"chunks"` RowShards uint32 `yaml:"row_shards"` } @@ -86,6 +87,7 @@ func (cfg *SchemaConfig) loadFromFile() error { cfg.fileName = cfg.legacyFileName if cfg.legacyFileName != "" { + flagext.DeprecatedFlagsUsed.Inc() level.Warn(util.Logger).Log("msg", "running with DEPRECATED flag -config-yaml, use -schema-config-file instead") } } @@ -107,7 +109,9 @@ func (cfg *SchemaConfig) loadFromFile() error { // Validate the schema config and returns an error if the validation // doesn't pass func (cfg *SchemaConfig) Validate() error { - for _, periodCfg := range cfg.Configs { + for i := range cfg.Configs { + periodCfg := &cfg.Configs[i] + periodCfg.applyDefaults() if err := periodCfg.validate(); err != nil { return err } @@ -142,10 +146,6 @@ func (cfg *SchemaConfig) ForEachAfter(t model.Time, f func(config *PeriodConfig) // CreateSchema returns the schema defined by the PeriodConfig func (cfg PeriodConfig) CreateSchema() Schema { - rowShards := defaultRowShards(cfg.Schema) - if cfg.RowShards > 0 { - rowShards = cfg.RowShards - } var e entries switch cfg.Schema { @@ -165,12 +165,12 @@ func (cfg PeriodConfig) CreateSchema() Schema { e = v9Entries{} case "v10": e = v10Entries{ - rowShards: rowShards, + rowShards: cfg.RowShards, } case "v11": e = v11Entries{ v10Entries: v10Entries{ - rowShards: rowShards, + rowShards: cfg.RowShards, }, } default: @@ -191,7 +191,13 @@ func (cfg PeriodConfig) createBucketsFunc() (schemaBucketsFunc, time.Duration) { } } -// validate the period config +func (cfg *PeriodConfig) applyDefaults() { + if cfg.RowShards == 0 { + cfg.RowShards = defaultRowShards(cfg.Schema) + } +} + +// Validate the period config. func (cfg PeriodConfig) validate() error { // Ensure the schema version exists schema := cfg.CreateSchema() @@ -210,6 +216,17 @@ func (cfg PeriodConfig) validate() error { return errInvalidTablePeriod } + switch cfg.Schema { + case "v1", "v2", "v3", "v4", "v5", "v6", "v9": + case "v10", "v11": + if cfg.RowShards == 0 { + return fmt.Errorf("Must have row_shards > 0 (current: %d) for schema (%s)", cfg.RowShards, cfg.Schema) + } + default: + // This generally unreachable path protects us from adding schemas and not handling them in this function. + return fmt.Errorf("unexpected schema (%s)", cfg.Schema) + } + return nil } @@ -330,13 +347,13 @@ func (cfg PeriodicTableConfig) MarshalYAML() (interface{}, error) { // AutoScalingConfig for DynamoDB tables. type AutoScalingConfig struct { - Enabled bool `yaml:"enabled,omitempty"` - RoleARN string `yaml:"role_arn,omitempty"` - MinCapacity int64 `yaml:"min_capacity,omitempty"` - MaxCapacity int64 `yaml:"max_capacity,omitempty"` - OutCooldown int64 `yaml:"out_cooldown,omitempty"` - InCooldown int64 `yaml:"in_cooldown,omitempty"` - TargetValue float64 `yaml:"target,omitempty"` + Enabled bool `yaml:"enabled"` + RoleARN string `yaml:"role_arn"` + MinCapacity int64 `yaml:"min_capacity"` + MaxCapacity int64 `yaml:"max_capacity"` + OutCooldown int64 `yaml:"out_cooldown"` + InCooldown int64 `yaml:"in_cooldown"` + TargetValue float64 `yaml:"target"` } // RegisterFlags adds the flags required to config this to the given FlagSet. diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go index 16f5fb2fe331d..7d99e0349c5cb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/series_store.go @@ -68,13 +68,8 @@ type seriesStore struct { writeDedupeCache cache.Cache } -func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, limits StoreLimits) (Store, error) { - fetcher, err := NewChunkFetcher(cfg.ChunkCacheConfig, cfg.chunkCacheStubs, chunks) - if err != nil { - return nil, err - } - - writeDedupeCache, err := cache.New(cfg.WriteDedupeCacheConfig) +func newSeriesStore(cfg StoreConfig, schema Schema, index IndexClient, chunks Client, limits StoreLimits, chunksCache, writeDedupeCache cache.Cache) (Store, error) { + fetcher, err := NewChunkFetcher(chunksCache, cfg.chunkCacheStubs, chunks) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index e46fffee3d661..ef74d02ebf9d2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -59,11 +59,11 @@ type Config struct { BoltDBConfig local.BoltDBConfig `yaml:"boltdb"` FSConfig local.FSConfig `yaml:"filesystem"` - IndexCacheValidity time.Duration + IndexCacheValidity time.Duration `yaml:"index_cache_validity"` - IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config,omitempty"` + IndexQueriesCacheConfig cache.Config `yaml:"index_queries_cache_config"` - DeleteStoreConfig purger.DeleteStoreConfig `yaml:"delete_store,omitempty"` + DeleteStoreConfig purger.DeleteStoreConfig `yaml:"delete_store"` } // RegisterFlags adds the flags required to configure this flag set. @@ -95,14 +95,28 @@ func (cfg *Config) Validate() error { // NewStore makes the storage clients based on the configuration. func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConfig, limits StoreLimits) (chunk.Store, error) { - tieredCache, err := cache.New(cfg.IndexQueriesCacheConfig) + indexReadCache, err := cache.New(cfg.IndexQueriesCacheConfig) + if err != nil { + return nil, err + } + + writeDedupeCache, err := cache.New(storeCfg.WriteDedupeCacheConfig) + if err != nil { + return nil, err + } + + chunkCacheCfg := storeCfg.ChunkCacheConfig + chunkCacheCfg.Prefix = "chunks" + chunksCache, err := cache.New(chunkCacheCfg) if err != nil { return nil, err } // Cache is shared by multiple stores, which means they will try and Stop // it more than once. Wrap in a StopOnce to prevent this. - tieredCache = cache.StopOnce(tieredCache) + indexReadCache = cache.StopOnce(indexReadCache) + chunksCache = cache.StopOnce(chunksCache) + writeDedupeCache = cache.StopOnce(writeDedupeCache) err = schemaCfg.Load() if err != nil { @@ -115,7 +129,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf if err != nil { return nil, errors.Wrap(err, "error creating index client") } - index = newCachingIndexClient(index, tieredCache, cfg.IndexCacheValidity, limits) + index = newCachingIndexClient(index, indexReadCache, cfg.IndexCacheValidity, limits) objectStoreType := s.ObjectType if objectStoreType == "" { @@ -126,7 +140,7 @@ func NewStore(cfg Config, storeCfg chunk.StoreConfig, schemaCfg chunk.SchemaConf return nil, errors.Wrap(err, "error creating object client") } - err = stores.AddPeriod(storeCfg, s, index, chunks, limits) + err = stores.AddPeriod(storeCfg, s, index, chunks, limits, chunksCache, writeDedupeCache) if err != nil { return nil, err } @@ -176,7 +190,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun case "inmemory": return chunk.NewMockStorage(), nil case "aws", "s3": - return newChunkClientFromStore(aws.NewS3ObjectClient(cfg.AWSStorageConfig.S3Config)) + return newChunkClientFromStore(aws.NewS3ObjectClient(cfg.AWSStorageConfig.S3Config, chunk.DirDelim)) case "aws-dynamo": if cfg.AWSStorageConfig.DynamoDB.URL == nil { return nil, fmt.Errorf("Must set -dynamodb.url in aws mode") @@ -187,13 +201,13 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig) (chun } return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg) case "azure": - return newChunkClientFromStore(azure.NewBlobStorage(&cfg.AzureStorageConfig)) + return newChunkClientFromStore(azure.NewBlobStorage(&cfg.AzureStorageConfig, chunk.DirDelim)) case "gcp": return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcp-columnkey", "bigtable", "bigtable-hashed": return gcp.NewBigtableObjectClient(context.Background(), cfg.GCPStorageConfig, schemaCfg) case "gcs": - return newChunkClientFromStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig)) + return newChunkClientFromStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCSConfig, chunk.DirDelim)) case "cassandra": return cassandra.NewStorageClient(cfg.CassandraStorageConfig, schemaCfg) case "filesystem": diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index baee41e74bbed..7d188496f06b6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -40,15 +40,15 @@ func newTableManagerMetrics(r prometheus.Registerer) *tableManagerMetrics { m := tableManagerMetrics{} m.syncTableDuration = prometheus.NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", - Name: "dynamo_sync_tables_seconds", - Help: "Time spent doing SyncTables.", + Name: "table_manager_sync_duration_seconds", + Help: "Time spent synching tables.", Buckets: prometheus.DefBuckets, }, []string{"operation", "status_code"}) m.tableCapacity = prometheus.NewGaugeVec(prometheus.GaugeOpts{ Namespace: "cortex", - Name: "dynamo_table_capacity_units", - Help: "Per-table DynamoDB capacity, measured in DynamoDB capacity units.", + Name: "table_capacity_units", + Help: "Per-table capacity, measured in DynamoDB capacity units.", }, []string{"op", "table"}) m.createFailures = prometheus.NewGauge(prometheus.GaugeOpts{ @@ -95,7 +95,7 @@ type TableManagerConfig struct { RetentionPeriodModel model.Duration `yaml:"retention_period"` // Period with which the table manager will poll for tables. - DynamoDBPollInterval time.Duration `yaml:"dynamodb_poll_interval"` + PollInterval time.Duration `yaml:"poll_interval"` // duration a table will be created before it is needed. CreationGracePeriod time.Duration `yaml:"creation_grace_period"` @@ -139,12 +139,12 @@ func (cfg *TableManagerConfig) Validate() error { return nil } -// ProvisionConfig holds config for provisioning capacity (on DynamoDB) +// ProvisionConfig holds config for provisioning capacity (on DynamoDB for now) type ProvisionConfig struct { - ProvisionedThroughputOnDemandMode bool `yaml:"provisioned_throughput_on_demand_mode"` + ProvisionedThroughputOnDemandMode bool `yaml:"enable_ondemand_throughput_mode"` ProvisionedWriteThroughput int64 `yaml:"provisioned_write_throughput"` ProvisionedReadThroughput int64 `yaml:"provisioned_read_throughput"` - InactiveThroughputOnDemandMode bool `yaml:"inactive_throughput_on_demand_mode"` + InactiveThroughputOnDemandMode bool `yaml:"enable_inactive_throughput_on_demand_mode"` InactiveWriteThroughput int64 `yaml:"inactive_write_throughput"` InactiveReadThroughput int64 `yaml:"inactive_read_throughput"` @@ -161,21 +161,21 @@ func (cfg *TableManagerConfig) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.ThroughputUpdatesDisabled, "table-manager.throughput-updates-disabled", false, "If true, disable all changes to DB capacity") f.BoolVar(&cfg.RetentionDeletesEnabled, "table-manager.retention-deletes-enabled", false, "If true, enables retention deletes of DB tables") f.Var(&cfg.RetentionPeriodModel, "table-manager.retention-period", "Tables older than this retention period are deleted. Note: This setting is destructive to data!(default: 0, which disables deletion)") - f.DurationVar(&cfg.DynamoDBPollInterval, "dynamodb.poll-interval", 2*time.Minute, "How frequently to poll DynamoDB to learn our capacity.") - f.DurationVar(&cfg.CreationGracePeriod, "dynamodb.periodic-table.grace-period", 10*time.Minute, "DynamoDB periodic tables grace period (duration which table will be created/deleted before/after it's needed).") + f.DurationVar(&cfg.PollInterval, "table-manager.poll-interval", 2*time.Minute, "How frequently to poll backend to learn our capacity.") + f.DurationVar(&cfg.CreationGracePeriod, "table-manager.periodic-table.grace-period", 10*time.Minute, "Periodic tables grace period (duration which table will be created/deleted before/after it's needed).") - cfg.IndexTables.RegisterFlags("dynamodb.periodic-table", f) - cfg.ChunkTables.RegisterFlags("dynamodb.chunk-table", f) + cfg.IndexTables.RegisterFlags("table-manager.index-table", f) + cfg.ChunkTables.RegisterFlags("table-manager.chunk-table", f) } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *ProvisionConfig) RegisterFlags(argPrefix string, f *flag.FlagSet) { - f.Int64Var(&cfg.ProvisionedWriteThroughput, argPrefix+".write-throughput", 1000, "DynamoDB table default write throughput.") - f.Int64Var(&cfg.ProvisionedReadThroughput, argPrefix+".read-throughput", 300, "DynamoDB table default read throughput.") - f.BoolVar(&cfg.ProvisionedThroughputOnDemandMode, argPrefix+".enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled") - f.Int64Var(&cfg.InactiveWriteThroughput, argPrefix+".inactive-write-throughput", 1, "DynamoDB table write throughput for inactive tables.") - f.Int64Var(&cfg.InactiveReadThroughput, argPrefix+".inactive-read-throughput", 300, "DynamoDB table read throughput for inactive tables.") - f.BoolVar(&cfg.InactiveThroughputOnDemandMode, argPrefix+".inactive-enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled") + f.Int64Var(&cfg.ProvisionedWriteThroughput, argPrefix+".write-throughput", 1000, "Table default write throughput. Supported by DynamoDB") + f.Int64Var(&cfg.ProvisionedReadThroughput, argPrefix+".read-throughput", 300, "Table default read throughput. Supported by DynamoDB") + f.BoolVar(&cfg.ProvisionedThroughputOnDemandMode, argPrefix+".enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled. Supported by DynamoDB") + f.Int64Var(&cfg.InactiveWriteThroughput, argPrefix+".inactive-write-throughput", 1, "Table write throughput for inactive tables. Supported by DynamoDB") + f.Int64Var(&cfg.InactiveReadThroughput, argPrefix+".inactive-read-throughput", 300, "Table read throughput for inactive tables. Supported by DynamoDB") + f.BoolVar(&cfg.InactiveThroughputOnDemandMode, argPrefix+".inactive-enable-ondemand-throughput-mode", false, "Enables on demand throughput provisioning for the storage provider (if supported). Applies only to tables which are not autoscaled. Supported by DynamoDB") cfg.WriteScale.RegisterFlags(argPrefix+".write-throughput.scale", f) cfg.InactiveWriteScale.RegisterFlags(argPrefix+".inactive-write-throughput.scale", f) @@ -243,7 +243,7 @@ func (m *TableManager) stopping(_ error) error { } func (m *TableManager) loop(ctx context.Context) error { - ticker := time.NewTicker(m.cfg.DynamoDBPollInterval) + ticker := time.NewTicker(m.cfg.PollInterval) defer ticker.Stop() if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error { @@ -254,7 +254,7 @@ func (m *TableManager) loop(ctx context.Context) error { // Sleep for a bit to spread the sync load across different times if the tablemanagers are all started at once. select { - case <-time.After(time.Duration(rand.Int63n(int64(m.cfg.DynamoDBPollInterval)))): + case <-time.After(time.Duration(rand.Int63n(int64(m.cfg.PollInterval)))): case <-ctx.Done(): return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go index 92dd0dcaf4594..4124a9f629714 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/testutils/testutils.go @@ -12,6 +12,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/cortexproject/cortex/pkg/chunk" + "github.com/cortexproject/cortex/pkg/chunk/cache" promchunk "github.com/cortexproject/cortex/pkg/chunk/encoding" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -135,7 +136,7 @@ func SetupTestChunkStore() (chunk.Store, error) { flagext.DefaultValues(&storeCfg) store := chunk.NewCompositeStore() - err = store.AddPeriod(storeCfg, schemaCfg.Configs[0], storage, storage, overrides) + err = store.AddPeriod(storeCfg, schemaCfg.Configs[0], storage, storage, overrides, cache.NewNoopCache(), cache.NewNoopCache()) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index 45e3cb1dffc1d..0f6051a5a0869 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -25,7 +25,6 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" - "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/limiter" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" @@ -120,18 +119,18 @@ type Distributor struct { // Config contains the configuration require to // create a Distributor type Config struct { - PoolConfig ingester_client.PoolConfig `yaml:"pool,omitempty"` + PoolConfig ingester_client.PoolConfig `yaml:"pool"` - HATrackerConfig HATrackerConfig `yaml:"ha_tracker,omitempty"` + HATrackerConfig HATrackerConfig `yaml:"ha_tracker"` MaxRecvMsgSize int `yaml:"max_recv_msg_size"` - RemoteTimeout time.Duration `yaml:"remote_timeout,omitempty"` - ExtraQueryDelay time.Duration `yaml:"extra_queue_delay,omitempty"` + RemoteTimeout time.Duration `yaml:"remote_timeout"` + ExtraQueryDelay time.Duration `yaml:"extra_queue_delay"` - ShardByAllLabels bool `yaml:"shard_by_all_labels,omitempty"` + ShardByAllLabels bool `yaml:"shard_by_all_labels"` // Distributors ring - DistributorRing RingConfig `yaml:"ring,omitempty"` + DistributorRing RingConfig `yaml:"ring"` // for testing ingesterClientFactory client.Factory `yaml:"-"` @@ -146,7 +145,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, "distributor.max-recv-msg-size", 100<<20, "remote_write API max receive message size (bytes).") f.DurationVar(&cfg.RemoteTimeout, "distributor.remote-timeout", 2*time.Second, "Timeout for downstream ingesters.") f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") - flagext.DeprecatedFlag(f, "distributor.limiter-reload-period", "DEPRECATED. No more required because the local limiter is reconfigured as soon as the overrides change.") f.BoolVar(&cfg.ShardByAllLabels, "distributor.shard-by-all-labels", false, "Distribute samples based on all labels, as opposed to solely by user and metric name.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go index fc6e4422b5704..3251cdc7c45fa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go @@ -18,9 +18,9 @@ import ( // is used to strip down the config to the minimum, and avoid confusion // to the user. type RingConfig struct { - KVStore kv.Config `yaml:"kvstore,omitempty"` - HeartbeatPeriod time.Duration `yaml:"heartbeat_period,omitempty"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout,omitempty"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` // Instance details InstanceID string `yaml:"instance_id" doc:"hidden"` diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go index 363f16f8da8df..5f664c9c534f5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go @@ -82,7 +82,7 @@ type haTracker struct { // HATrackerConfig contains the configuration require to // create a HA Tracker. type HATrackerConfig struct { - EnableHATracker bool `yaml:"enable_ha_tracker,omitempty"` + EnableHATracker bool `yaml:"enable_ha_tracker"` // We should only update the timestamp if the difference // between the stored timestamp and the time we received a sample at // is more than this duration. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go index 70d55f09d25a8..9468ebf6f08cc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/pool.go @@ -31,15 +31,15 @@ type Factory func(addr string) (grpc_health_v1.HealthClient, error) // PoolConfig is config for creating a Pool. type PoolConfig struct { - ClientCleanupPeriod time.Duration `yaml:"client_cleanup_period,omitempty"` - HealthCheckIngesters bool `yaml:"health_check_ingesters,omitempty"` + ClientCleanupPeriod time.Duration `yaml:"client_cleanup_period"` + HealthCheckIngesters bool `yaml:"health_check_ingesters"` RemoteTimeout time.Duration `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *PoolConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.ClientCleanupPeriod, "distributor.client-cleanup-period", 15*time.Second, "How frequently to clean up clients for ingesters that have gone away.") - f.BoolVar(&cfg.HealthCheckIngesters, "distributor.health-check-ingesters", false, "Run a health check on each ingester client during periodic cleanup.") + f.BoolVar(&cfg.HealthCheckIngesters, "distributor.health-check-ingesters", true, "Run a health check on each ingester client during periodic cleanup.") } // Pool holds a cache of grpc_health_v1 clients. diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/shard_summer.go b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/shard_summer.go index 7a01084a1420b..45d251e1d2cb1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/shard_summer.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/shard_summer.go @@ -8,7 +8,6 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql" ) @@ -37,20 +36,16 @@ type shardSummer struct { } // NewShardSummer instantiates an ASTMapper which will fan out sum queries by shard -func NewShardSummer(shards int, squasher squasher, registerer prometheus.Registerer) (ASTMapper, error) { +func NewShardSummer(shards int, squasher squasher, shardedQueries prometheus.Counter) (ASTMapper, error) { if squasher == nil { return nil, errors.Errorf("squasher required and not passed") } return NewASTNodeMapper(&shardSummer{ - shards: shards, - squash: squasher, - currentShard: nil, - shardedQueries: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ - Namespace: "cortex", - Name: "frontend_sharded_queries_total", - Help: "Total number of sharded queries", - }), + shards: shards, + squash: squasher, + currentShard: nil, + shardedQueries: shardedQueries, }), nil } @@ -205,11 +200,20 @@ func (summer *shardSummer) splitSum( ) } - summer.shardedQueries.Add(float64(summer.shards)) + summer.recordShards(float64(summer.shards)) return parent, children, nil } +// ShardSummer is explicitly passed a prometheus.Counter during construction +// in order to prevent duplicate metric registerings (ShardSummers are created per request). +//recordShards prevents calling nil interfaces (commonly used in tests). +func (summer *shardSummer) recordShards(n float64) { + if summer.shardedQueries != nil { + summer.shardedQueries.Add(float64(summer.shards)) + } +} + func shardVectorSelector(curshard, shards int, selector *promql.VectorSelector) (promql.Node, error) { shardMatcher, err := labels.NewMatcher(labels.MatchEqual, ShardLabel, fmt.Sprintf(ShardLabelFmt, curshard, shards)) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go index 9cdd243a685b4..f388954ab2c98 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/frontend.go @@ -40,7 +40,7 @@ var ( type Config struct { MaxOutstandingPerTenant int `yaml:"max_outstanding_per_tenant"` CompressResponses bool `yaml:"compress_responses"` - DownstreamURL string `yaml:"downstream"` + DownstreamURL string `yaml:"downstream_url"` LogQueriesLongerThan time.Duration `yaml:"log_queries_longer_than"` } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go index a20bb3eeeafb6..6315d9acbe6bf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/frontend/worker.go @@ -31,16 +31,16 @@ var ( // WorkerConfig is config for a worker. type WorkerConfig struct { - Address string - Parallelism int - DNSLookupDuration time.Duration + Address string `yaml:"frontend_address"` + Parallelism int `yaml:"parallelism"` + DNSLookupDuration time.Duration `yaml:"dns_lookup_duration"` GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` } // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *WorkerConfig) RegisterFlags(f *flag.FlagSet) { - f.StringVar(&cfg.Address, "querier.frontend-address", "", "Address of query frontend service.") + f.StringVar(&cfg.Address, "querier.frontend-address", "", "Address of query frontend service, in host:port format.") f.IntVar(&cfg.Parallelism, "querier.worker-parallelism", 10, "Number of simultaneous queries to process.") f.DurationVar(&cfg.DNSLookupDuration, "querier.dns-lookup-period", 10*time.Second, "How often to query DNS.") diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go index 21e9283cb6772..b67722c4fe687 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/querysharding.go @@ -136,8 +136,9 @@ type astMapperware struct { next Handler // Metrics. - registerer prometheus.Registerer - mappedASTCounter prometheus.Counter + registerer prometheus.Registerer + mappedASTCounter prometheus.Counter + shardedQueriesCounter prometheus.Counter } func newASTMapperware(confs ShardingConfigs, next Handler, logger log.Logger, registerer prometheus.Registerer) *astMapperware { @@ -151,6 +152,11 @@ func newASTMapperware(confs ShardingConfigs, next Handler, logger log.Logger, re Name: "frontend_mapped_asts_total", Help: "Total number of queries that have undergone AST mapping", }), + shardedQueriesCounter: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Namespace: "cortex", + Name: "frontend_sharded_queries_total", + Help: "Total number of sharded queries", + }), } } @@ -162,7 +168,7 @@ func (ast *astMapperware) Do(ctx context.Context, r Request) (Response, error) { return ast.next.Do(ctx, r) } - shardSummer, err := astmapper.NewShardSummer(int(conf.RowShards), astmapper.VectorSquasher, ast.registerer) + shardSummer, err := astmapper.NewShardSummer(int(conf.RowShards), astmapper.VectorSquasher, ast.shardedQueriesCounter) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go index 12fea72d0967b..1b10c1a891bb9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go @@ -30,6 +30,7 @@ const pageContent = ` Instance ID + Availability Zone State Address Last Heartbeat @@ -46,6 +47,7 @@ const pageContent = ` {{ end }} {{ .ID }} + {{ .Zone }} {{ .State }} {{ .Address }} {{ .Timestamp }} @@ -138,16 +140,17 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { } ingesters = append(ingesters, struct { - ID, State, Address, Timestamp string - Tokens []uint32 - NumTokens int - Ownership float64 + ID, State, Address, Timestamp, Zone string + Tokens []uint32 + NumTokens int + Ownership float64 }{ ID: id, State: state, Address: ing.Addr, Timestamp: timestamp.String(), Tokens: ing.Tokens, + Zone: ing.Zone, NumTokens: len(ing.Tokens), Ownership: (float64(owned[id]) / float64(math.MaxUint32)) * 100, }) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go index 1c729fe15dfbe..f18fb455d7161 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/client.go @@ -22,9 +22,9 @@ var inmemoryStore Client // Consul, Etcd, Memberlist or MultiClient. It was extracted from Config to keep // single-client config separate from final client-config (with all the wrappers) type StoreConfig struct { - Consul consul.Config `yaml:"consul,omitempty"` - Etcd etcd.Config `yaml:"etcd,omitempty"` - Multi MultiConfig `yaml:"multi,omitempty"` + Consul consul.Config `yaml:"consul"` + Etcd etcd.Config `yaml:"etcd"` + Multi MultiConfig `yaml:"multi"` // Function that returns memberlist.KV store to use. By using a function, we can delay // initialization of memberlist.KV until it is actually required. @@ -34,8 +34,8 @@ type StoreConfig struct { // Config is config for a KVStore currently used by ring and HA tracker, // where store can be consul or inmemory. type Config struct { - Store string `yaml:"store,omitempty"` - Prefix string `yaml:"prefix,omitempty"` + Store string `yaml:"store"` + Prefix string `yaml:"prefix"` StoreConfig `yaml:",inline"` Mock Client `yaml:"-"` diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go index 92457274b6402..3fdc24c543649 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go @@ -35,12 +35,12 @@ var ( // Config to create a ConsulClient type Config struct { - Host string - ACLToken string - HTTPClientTimeout time.Duration - ConsistentReads bool - WatchKeyRateLimit float64 // Zero disables rate limit - WatchKeyBurstSize int // Burst when doing rate-limit, defaults to 1 + Host string `yaml:"host"` + ACLToken string `yaml:"acl_token"` + HTTPClientTimeout time.Duration `yaml:"http_client_timeout"` + ConsistentReads bool `yaml:"consistent_reads"` + WatchKeyRateLimit float64 `yaml:"watch_rate_limit"` // Zero disables rate limit + WatchKeyBurstSize int `yaml:"watch_burst_size"` // Burst when doing rate-limit, defaults to 1 } type kv interface { @@ -61,10 +61,10 @@ type Client struct { // If prefix is not an empty string it should end with a period. func (cfg *Config) RegisterFlags(f *flag.FlagSet, prefix string) { f.StringVar(&cfg.Host, prefix+"consul.hostname", "localhost:8500", "Hostname and port of Consul.") - f.StringVar(&cfg.ACLToken, prefix+"consul.acltoken", "", "ACL Token used to interact with Consul.") + f.StringVar(&cfg.ACLToken, prefix+"consul.acl-token", "", "ACL Token used to interact with Consul.") f.DurationVar(&cfg.HTTPClientTimeout, prefix+"consul.client-timeout", 2*longPollDuration, "HTTP timeout when talking to Consul") - f.BoolVar(&cfg.ConsistentReads, prefix+"consul.consistent-reads", true, "Enable consistent reads to Consul.") - f.Float64Var(&cfg.WatchKeyRateLimit, prefix+"consul.watch-rate-limit", 0, "Rate limit when watching key or prefix in Consul, in requests per second. 0 disables the rate limit.") + f.BoolVar(&cfg.ConsistentReads, prefix+"consul.consistent-reads", false, "Enable consistent reads to Consul.") + f.Float64Var(&cfg.WatchKeyRateLimit, prefix+"consul.watch-rate-limit", 1, "Rate limit when watching key or prefix in Consul, in requests per second. 0 disables the rate limit.") f.IntVar(&cfg.WatchKeyBurstSize, prefix+"consul.watch-burst-size", 1, "Burst size used in rate limit. Values less than 1 are treated as 1.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go index 4a973a46cf285..3a75b197cf463 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go @@ -41,6 +41,23 @@ func New(cfg Config, codec codec.Codec) (*Client, error) { cli, err := clientv3.New(clientv3.Config{ Endpoints: cfg.Endpoints, DialTimeout: cfg.DialTimeout, + // Configure the keepalive to make sure that the client reconnects + // to the etcd service endpoint(s) in case the current connection is + // dead (ie. the node where etcd is running is dead or a network + // partition occurs). + // + // The settings: + // - DialKeepAliveTime: time before the client pings the server to + // see if transport is alive (10s hardcoded) + // - DialKeepAliveTimeout: time the client waits for a response for + // the keep-alive probe (set to 2x dial timeout, in order to avoid + // exposing another config option which is likely to be a factor of + // the dial timeout anyway) + // - PermitWithoutStream: whether the client should send keepalive pings + // to server without any active streams (enabled) + DialKeepAliveTime: 10 * time.Second, + DialKeepAliveTimeout: 2 * cfg.DialTimeout, + PermitWithoutStream: true, }) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go index c0ed7056d70b2..e6e56f7c39ca9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go @@ -189,11 +189,11 @@ var ( errTooManyRetries = errors.New("too many retries") ) -// NewMemberlistClient creates new Client instance. If cfg.JoinMembers is set, it will also try to connect +// NewKV creates new Client instance. If cfg.JoinMembers is set, it will also try to connect // to these members and join the cluster. If that fails and AbortIfJoinFails is true, error is returned and no // client is created. func NewKV(cfg KVConfig) (*KV, error) { - level.Warn(util.Logger).Log("msg", "Using memberlist-based KV store is EXPERIMENTAL and not tested in production") + util.WarnExperimentalUse("Gossip memberlist ring") cfg.TCPTransport.MetricsRegisterer = cfg.MetricsRegisterer cfg.TCPTransport.MetricsNamespace = cfg.MetricsNamespace diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index 62d6ca6352235..1b90f5f151f91 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -43,7 +43,7 @@ var ( // LifecyclerConfig is the config to build a Lifecycler. type LifecyclerConfig struct { - RingConfig Config `yaml:"ring,omitempty"` + RingConfig Config `yaml:"ring"` // Config for the ingester lifecycle control ListenPort *int `yaml:"-"` @@ -55,16 +55,13 @@ type LifecyclerConfig struct { InfNames []string `yaml:"interface_names"` FinalSleep time.Duration `yaml:"final_sleep"` TokensFilePath string `yaml:"tokens_file_path"` + Zone string `yaml:"availability_zone"` // For testing, you can override the address and ID of this ingester Addr string `yaml:"address" doc:"hidden"` Port int `doc:"hidden"` ID string `doc:"hidden"` SkipUnregister bool `yaml:"-"` - - // graveyard for unused flags. - UnusedFlag bool `yaml:"claim_on_rollout,omitempty"` // DEPRECATED - left for backwards-compatibility - UnusedFlag2 bool `yaml:"normalise_tokens,omitempty"` // DEPRECATED - left for backwards-compatibility } // RegisterFlags adds the flags required to config this to the given FlagSet @@ -87,8 +84,6 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag f.DurationVar(&cfg.JoinAfter, prefix+"join-after", 0*time.Second, "Period to wait for a claim from another member; will join automatically after this.") f.DurationVar(&cfg.ObservePeriod, prefix+"observe-period", 0*time.Second, "Observe tokens after generating to resolve collisions. Useful when using gossiping ring.") f.DurationVar(&cfg.MinReadyDuration, prefix+"min-ready-duration", 1*time.Minute, "Minimum duration to wait before becoming ready. This is to work around race conditions with ingesters exiting and updating the ring.") - flagext.DeprecatedFlag(f, prefix+"claim-on-rollout", "DEPRECATED. This feature is no longer optional.") - flagext.DeprecatedFlag(f, prefix+"normalise-tokens", "DEPRECATED. This feature is no longer optional.") f.DurationVar(&cfg.FinalSleep, prefix+"final-sleep", 30*time.Second, "Duration to sleep for before exiting, to ensure metrics are scraped.") f.StringVar(&cfg.TokensFilePath, prefix+"tokens-file-path", "", "File path where tokens are stored. If empty, tokens are not stored at shutdown and restored at startup.") @@ -103,6 +98,7 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag f.StringVar(&cfg.Addr, prefix+"lifecycler.addr", "", "IP address to advertise in consul.") f.IntVar(&cfg.Port, prefix+"lifecycler.port", 0, "port to advertise in consul (defaults to server.grpc-listen-port).") f.StringVar(&cfg.ID, prefix+"lifecycler.ID", hostname, "ID to register into consul.") + f.StringVar(&cfg.Zone, prefix+"availability-zone", "", "The availability zone of the host, this instance is running on. Default is the lifecycler ID.") } // Lifecycler is responsible for managing the lifecycle of entries in the ring. @@ -120,6 +116,7 @@ type Lifecycler struct { Addr string RingName string RingKey string + Zone string // Whether to flush if transfer fails on shutdown. flushOnShutdown bool @@ -160,6 +157,15 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa return nil, err } + zone := cfg.Zone + if zone != "" { + util.WarnExperimentalUse("Zone aware replication") + } + + if zone == "" { + zone = cfg.ID + } + // We do allow a nil FlushTransferer, but to keep the ring logic easier we assume // it's always set, so we use a noop FlushTransferer if flushTransferer == nil { @@ -176,6 +182,7 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa RingName: ringName, RingKey: ringKey, flushOnShutdown: flushOnShutdown, + Zone: zone, actorChan: make(chan func()), @@ -502,14 +509,14 @@ func (i *Lifecycler) initRing(ctx context.Context) error { if len(tokensFromFile) >= i.cfg.NumTokens { i.setState(ACTIVE) } - ringDesc.AddIngester(i.ID, i.Addr, tokensFromFile, i.GetState()) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, tokensFromFile, i.GetState()) i.setTokens(tokensFromFile) return ringDesc, true, nil } // Either we are a new ingester, or consul must have restarted level.Info(util.Logger).Log("msg", "instance not found in ring, adding with no tokens", "ring", i.RingName) - ringDesc.AddIngester(i.ID, i.Addr, []uint32{}, i.GetState()) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, []uint32{}, i.GetState()) return ringDesc, true, nil } @@ -564,7 +571,7 @@ func (i *Lifecycler) verifyTokens(ctx context.Context) bool { ringTokens = append(ringTokens, newTokens...) sort.Sort(ringTokens) - ringDesc.AddIngester(i.ID, i.Addr, ringTokens, i.GetState()) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, ringTokens, i.GetState()) i.setTokens(ringTokens) @@ -626,7 +633,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState IngesterState) er sort.Sort(myTokens) i.setTokens(myTokens) - ringDesc.AddIngester(i.ID, i.Addr, i.getTokens(), i.GetState()) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState()) return ringDesc, true, nil }) @@ -655,7 +662,7 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { if !ok { // consul must have restarted level.Info(util.Logger).Log("msg", "found empty ring, inserting tokens", "ring", i.RingName) - ringDesc.AddIngester(i.ID, i.Addr, i.getTokens(), i.GetState()) + ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState()) } else { ingesterDesc.Timestamp = time.Now().Unix() ingesterDesc.State = i.GetState() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go index 3170deb91283d..f13965b4e7e8a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go @@ -37,7 +37,7 @@ func NewDesc() *Desc { // AddIngester adds the given ingester to the ring. Ingester will only use supplied tokens, // any other tokens are removed. -func (d *Desc) AddIngester(id, addr string, tokens []uint32, state IngesterState) { +func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState) { if d.Ingesters == nil { d.Ingesters = map[string]IngesterDesc{} } @@ -47,6 +47,7 @@ func (d *Desc) AddIngester(id, addr string, tokens []uint32, state IngesterState Timestamp: time.Now().Unix(), State: state, Tokens: tokens, + Zone: zone, } d.Ingesters[id] = ingester @@ -377,6 +378,7 @@ func (d *Desc) RemoveTombstones(limit time.Time) { type TokenDesc struct { Token uint32 Ingester string + Zone string } // Returns sorted list of tokens with ingester names. @@ -388,7 +390,7 @@ func (d *Desc) getTokens() []TokenDesc { tokens := make([]TokenDesc, 0, numTokens) for key, ing := range d.Ingesters { for _, token := range ing.Tokens { - tokens = append(tokens, TokenDesc{Token: token, Ingester: key}) + tokens = append(tokens, TokenDesc{Token: token, Ingester: key, Zone: ing.GetZone()}) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index 9005f4ad85a41..ec9243688e895 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -34,6 +34,9 @@ const ( // CompactorRingKey is the key under which we store the compactors ring in the KVStore. CompactorRingKey = "compactor" + + // StoreGatewayRingKey is the key under which we store the store gateways ring in the KVStore. + StoreGatewayRingKey = "store-gateway" ) // ReadRing represents the read interface to the ring. @@ -65,9 +68,9 @@ var ErrEmptyRing = errors.New("empty ring") // Config for a Ring type Config struct { - KVStore kv.Config `yaml:"kvstore,omitempty"` - HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout,omitempty"` - ReplicationFactor int `yaml:"replication_factor,omitempty"` + KVStore kv.Config `yaml:"kvstore"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` } // RegisterFlags adds the flags required to config this to the given FlagSet with a specified prefix @@ -182,6 +185,7 @@ func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet n = r.cfg.ReplicationFactor ingesters = buf[:0] distinctHosts = map[string]struct{}{} + distinctZones = map[string]struct{}{} start = r.search(key) iterations = 0 ) @@ -190,11 +194,17 @@ func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet // Wrap i around in the ring. i %= len(r.ringTokens) - // We want n *distinct* ingesters. + // We want n *distinct* ingesters && distinct zones. token := r.ringTokens[i] if _, ok := distinctHosts[token.Ingester]; ok { continue } + if token.Zone != "" { // Ignore if the ingesters don't have a zone set. + if _, ok := distinctZones[token.Zone]; ok { + continue + } + distinctZones[token.Zone] = struct{}{} + } distinctHosts[token.Ingester] = struct{}{} ingester := r.ringDesc.Ingesters[token.Ingester] diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go index 62e3ad152af9f..077b41b379bec 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go @@ -107,6 +107,7 @@ type IngesterDesc struct { Timestamp int64 `protobuf:"varint,2,opt,name=timestamp,proto3" json:"timestamp,omitempty"` State IngesterState `protobuf:"varint,3,opt,name=state,proto3,enum=ring.IngesterState" json:"state,omitempty"` Tokens []uint32 `protobuf:"varint,6,rep,packed,name=tokens,proto3" json:"tokens,omitempty"` + Zone string `protobuf:"bytes,7,opt,name=zone,proto3" json:"zone,omitempty"` } func (m *IngesterDesc) Reset() { *m = IngesterDesc{} } @@ -169,6 +170,13 @@ func (m *IngesterDesc) GetTokens() []uint32 { return nil } +func (m *IngesterDesc) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + func init() { proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value) proto.RegisterType((*Desc)(nil), "ring.Desc") @@ -179,32 +187,32 @@ func init() { func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 387 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x3f, 0x6f, 0xd3, 0x40, - 0x18, 0xc6, 0xef, 0xb5, 0xcf, 0xc6, 0x79, 0x43, 0x2b, 0xeb, 0x90, 0x90, 0xa9, 0xd0, 0x61, 0x75, - 0x32, 0x48, 0xb8, 0x52, 0x60, 0x40, 0x48, 0x0c, 0x2d, 0x35, 0xc8, 0x56, 0x14, 0x2a, 0x53, 0x75, - 0x77, 0xda, 0xc3, 0x58, 0x25, 0x76, 0x65, 0x5f, 0x90, 0xba, 0xf1, 0x0d, 0xe0, 0x0b, 0xb0, 0xf3, - 0x51, 0x3a, 0x66, 0xcc, 0x84, 0x88, 0xb3, 0x30, 0xe6, 0x23, 0xa0, 0x3b, 0x27, 0x0a, 0xd9, 0x9e, - 0xdf, 0x3d, 0x7f, 0xde, 0xe1, 0x10, 0xeb, 0xa2, 0xcc, 0xc3, 0x9b, 0xba, 0x92, 0x15, 0xa3, 0x4a, - 0x1f, 0x3c, 0xcf, 0x0b, 0xf9, 0x79, 0x3a, 0x0e, 0x2f, 0xab, 0xc9, 0x51, 0x5e, 0xe5, 0xd5, 0x91, - 0x36, 0xc7, 0xd3, 0x4f, 0x9a, 0x34, 0x68, 0xd5, 0x95, 0x0e, 0x7f, 0x02, 0xd2, 0x53, 0xd1, 0x5c, - 0xb2, 0x37, 0xd8, 0x2b, 0xca, 0x5c, 0x34, 0x52, 0xd4, 0x8d, 0x07, 0xbe, 0x19, 0xf4, 0x07, 0x8f, - 0x42, 0xbd, 0xae, 0xec, 0x30, 0xde, 0x78, 0x51, 0x29, 0xeb, 0xdb, 0x13, 0x7a, 0xf7, 0xfb, 0x09, - 0x49, 0xb7, 0x8d, 0x83, 0x33, 0xdc, 0xdf, 0x8d, 0x30, 0x17, 0xcd, 0x6b, 0x71, 0xeb, 0x81, 0x0f, - 0x41, 0x2f, 0x55, 0x92, 0x05, 0x68, 0x7d, 0xcd, 0xbe, 0x4c, 0x85, 0x67, 0xf8, 0x10, 0xf4, 0x07, - 0xac, 0x9b, 0xdf, 0xd4, 0xd4, 0x99, 0xb4, 0x0b, 0xbc, 0x36, 0x5e, 0x41, 0x42, 0x1d, 0xc3, 0x35, - 0x0f, 0xbf, 0x03, 0xde, 0xff, 0x3f, 0xc1, 0x18, 0xd2, 0xec, 0xea, 0xaa, 0x5e, 0xef, 0x6a, 0xcd, - 0x1e, 0x63, 0x4f, 0x16, 0x13, 0xd1, 0xc8, 0x6c, 0x72, 0xa3, 0xc7, 0xcd, 0x74, 0xfb, 0xc0, 0x9e, - 0xa2, 0xd5, 0xc8, 0x4c, 0x0a, 0xcf, 0xf4, 0x21, 0xd8, 0x1f, 0x3c, 0xd8, 0x3d, 0xfb, 0x51, 0x59, - 0x69, 0x97, 0x60, 0x0f, 0xd1, 0x96, 0xd5, 0xb5, 0x28, 0x1b, 0xcf, 0xf6, 0xcd, 0x60, 0x2f, 0x5d, - 0x53, 0x42, 0x1d, 0xea, 0x5a, 0x09, 0x75, 0x2c, 0xd7, 0x7e, 0x36, 0xc4, 0xbd, 0x9d, 0x2e, 0x43, - 0xb4, 0x8f, 0xdf, 0x9e, 0xc7, 0x17, 0x91, 0x4b, 0x58, 0x1f, 0xef, 0x0d, 0xa3, 0xe3, 0x8b, 0x78, - 0xf4, 0xde, 0x05, 0x05, 0x67, 0xd1, 0xe8, 0x54, 0x81, 0xa1, 0x20, 0xf9, 0x10, 0x8f, 0x14, 0x98, - 0xcc, 0x41, 0x3a, 0x8c, 0xde, 0x9d, 0xbb, 0xf4, 0xe4, 0xe5, 0x6c, 0xc1, 0xc9, 0x7c, 0xc1, 0xc9, - 0x6a, 0xc1, 0xe1, 0x5b, 0xcb, 0xe1, 0x57, 0xcb, 0xe1, 0xae, 0xe5, 0x30, 0x6b, 0x39, 0xfc, 0x69, - 0x39, 0xfc, 0x6d, 0x39, 0x59, 0xb5, 0x1c, 0x7e, 0x2c, 0x39, 0x99, 0x2d, 0x39, 0x99, 0x2f, 0x39, - 0x19, 0xdb, 0xfa, 0xf3, 0x5e, 0xfc, 0x0b, 0x00, 0x00, 0xff, 0xff, 0x33, 0x18, 0xb8, 0xad, 0xff, - 0x01, 0x00, 0x00, + // 399 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x91, 0x31, 0x6f, 0xd3, 0x40, + 0x1c, 0xc5, 0xef, 0x6f, 0x9f, 0x5d, 0xe7, 0x1f, 0x5a, 0x59, 0x87, 0x84, 0x4c, 0x85, 0x0e, 0xab, + 0x93, 0x41, 0xc2, 0x95, 0x02, 0x03, 0x42, 0x62, 0x68, 0xa9, 0x41, 0xb6, 0xa2, 0x50, 0x99, 0xaa, + 0xbb, 0xd3, 0x1e, 0xc6, 0x2a, 0xb1, 0x2b, 0xfb, 0x82, 0x54, 0x26, 0x3e, 0x02, 0x5f, 0x80, 0x89, + 0x85, 0x8f, 0xd2, 0x31, 0x63, 0x27, 0x44, 0x9c, 0x85, 0x31, 0x1f, 0x01, 0xdd, 0x39, 0x51, 0xc8, + 0xf6, 0x7e, 0xf7, 0xde, 0xff, 0xbd, 0xe1, 0x10, 0xeb, 0xa2, 0xcc, 0xc3, 0xeb, 0xba, 0x92, 0x15, + 0xa3, 0x4a, 0xef, 0x3f, 0xcb, 0x0b, 0xf9, 0x69, 0x3a, 0x0e, 0x2f, 0xaa, 0xc9, 0x61, 0x5e, 0xe5, + 0xd5, 0xa1, 0x36, 0xc7, 0xd3, 0x8f, 0x9a, 0x34, 0x68, 0xd5, 0x1d, 0x1d, 0xfc, 0x00, 0xa4, 0x27, + 0xa2, 0xb9, 0x60, 0xaf, 0xb1, 0x57, 0x94, 0xb9, 0x68, 0xa4, 0xa8, 0x1b, 0x0f, 0x7c, 0x33, 0xe8, + 0x0f, 0x1e, 0x86, 0xba, 0x5d, 0xd9, 0x61, 0xbc, 0xf6, 0xa2, 0x52, 0xd6, 0x37, 0xc7, 0xf4, 0xf6, + 0xf7, 0x63, 0x92, 0x6e, 0x2e, 0xf6, 0x4f, 0x71, 0x6f, 0x3b, 0xc2, 0x5c, 0x34, 0xaf, 0xc4, 0x8d, + 0x07, 0x3e, 0x04, 0xbd, 0x54, 0x49, 0x16, 0xa0, 0xf5, 0x25, 0xfb, 0x3c, 0x15, 0x9e, 0xe1, 0x43, + 0xd0, 0x1f, 0xb0, 0xae, 0x7e, 0x7d, 0xa6, 0x66, 0xd2, 0x2e, 0xf0, 0xca, 0x78, 0x09, 0x09, 0x75, + 0x0c, 0xd7, 0x3c, 0xf8, 0x09, 0x78, 0xef, 0xff, 0x04, 0x63, 0x48, 0xb3, 0xcb, 0xcb, 0x7a, 0xd5, + 0xab, 0x35, 0x7b, 0x84, 0x3d, 0x59, 0x4c, 0x44, 0x23, 0xb3, 0xc9, 0xb5, 0x2e, 0x37, 0xd3, 0xcd, + 0x03, 0x7b, 0x82, 0x56, 0x23, 0x33, 0x29, 0x3c, 0xd3, 0x87, 0x60, 0x6f, 0x70, 0x7f, 0x7b, 0xf6, + 0x83, 0xb2, 0xd2, 0x2e, 0xc1, 0x1e, 0xa0, 0x2d, 0xab, 0x2b, 0x51, 0x36, 0x9e, 0xed, 0x9b, 0xc1, + 0x6e, 0xba, 0x22, 0x35, 0xfa, 0xb5, 0x2a, 0x85, 0xb7, 0xd3, 0x8d, 0x2a, 0x9d, 0x50, 0x87, 0xba, + 0x56, 0x42, 0x1d, 0xcb, 0xb5, 0x9f, 0x0e, 0x71, 0x77, 0xab, 0x8f, 0x21, 0xda, 0x47, 0x6f, 0xce, + 0xe2, 0xf3, 0xc8, 0x25, 0xac, 0x8f, 0x3b, 0xc3, 0xe8, 0xe8, 0x3c, 0x1e, 0xbd, 0x73, 0x41, 0xc1, + 0x69, 0x34, 0x3a, 0x51, 0x60, 0x28, 0x48, 0xde, 0xc7, 0x23, 0x05, 0x26, 0x73, 0x90, 0x0e, 0xa3, + 0xb7, 0x67, 0x2e, 0x3d, 0x7e, 0x31, 0x9b, 0x73, 0x72, 0x37, 0xe7, 0x64, 0x39, 0xe7, 0xf0, 0xad, + 0xe5, 0xf0, 0xab, 0xe5, 0x70, 0xdb, 0x72, 0x98, 0xb5, 0x1c, 0xfe, 0xb4, 0x1c, 0xfe, 0xb6, 0x9c, + 0x2c, 0x5b, 0x0e, 0xdf, 0x17, 0x9c, 0xcc, 0x16, 0x9c, 0xdc, 0x2d, 0x38, 0x19, 0xdb, 0xfa, 0x43, + 0x9f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xbf, 0xe3, 0xcc, 0xec, 0x13, 0x02, 0x00, 0x00, } func (x IngesterState) String() string { @@ -281,6 +289,9 @@ func (this *IngesterDesc) Equal(that interface{}) bool { return false } } + if this.Zone != that1.Zone { + return false + } return true } func (this *Desc) GoString() string { @@ -309,12 +320,13 @@ func (this *IngesterDesc) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&ring.IngesterDesc{") s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") s = append(s, "Tokens: "+fmt.Sprintf("%#v", this.Tokens)+",\n") + s = append(s, "Zone: "+fmt.Sprintf("%#v", this.Zone)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -393,6 +405,13 @@ func (m *IngesterDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Zone) > 0 { + i -= len(m.Zone) + copy(dAtA[i:], m.Zone) + i = encodeVarintRing(dAtA, i, uint64(len(m.Zone))) + i-- + dAtA[i] = 0x3a + } if len(m.Tokens) > 0 { dAtA3 := make([]byte, len(m.Tokens)*10) var j2 int @@ -483,6 +502,10 @@ func (m *IngesterDesc) Size() (n int) { } n += 1 + sovRing(uint64(l)) + l } + l = len(m.Zone) + if l > 0 { + n += 1 + l + sovRing(uint64(l)) + } return n } @@ -521,6 +544,7 @@ func (this *IngesterDesc) String() string { `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, `Tokens:` + fmt.Sprintf("%v", this.Tokens) + `,`, + `Zone:` + fmt.Sprintf("%v", this.Zone) + `,`, `}`, }, "") return s @@ -890,6 +914,38 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { } else { return fmt.Errorf("proto: wrong wireType = %d for field Tokens", wireType) } + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Zone", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowRing + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthRing + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthRing + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Zone = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipRing(dAtA[iNdEx:]) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto index 9670b01c090d8..8290ee2cca3b4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto @@ -19,6 +19,7 @@ message IngesterDesc { int64 timestamp = 2; // unix timestamp IngesterState state = 3; repeated uint32 tokens = 6; + string zone = 7; } enum IngesterState { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/backoff.go b/vendor/github.com/cortexproject/cortex/pkg/util/backoff.go index 3656f19cd0b2d..e3d290dcd393c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/backoff.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/backoff.go @@ -10,9 +10,9 @@ import ( // BackoffConfig configures a Backoff type BackoffConfig struct { - MinBackoff time.Duration // start backoff at this level - MaxBackoff time.Duration // increase exponentially to this level - MaxRetries int // give up after this many; zero means infinite retries + MinBackoff time.Duration `yaml:"min_period"` // start backoff at this level + MaxBackoff time.Duration `yaml:"max_period"` // increase exponentially to this level + MaxRetries int `yaml:"max_retries"` // give up after this many; zero means infinite retries } // RegisterFlags for BackoffConfig. diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/experimental.go b/vendor/github.com/cortexproject/cortex/pkg/util/experimental.go new file mode 100644 index 0000000000000..6cc163e9deb3f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/experimental.go @@ -0,0 +1,21 @@ +package util + +import ( + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" +) + +var experimentalFeaturesInUse = promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: "cortex", + Name: "experimental_features_in_use_total", + Help: "The number of experimental features in use.", + }, +) + +// WarnExperimentalUse logs a warning and increments the experimental features metric. +func WarnExperimentalUse(feature string) { + level.Warn(Logger).Log("msg", "experimental feature in use", "feature", feature) + experimentalFeaturesInUse.Inc() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go index 874ac440004f9..414dce2954f0e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go @@ -4,10 +4,20 @@ import ( "flag" "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/util" ) +// DeprecatedFlagsUsed is the metric that counts deprecated flags set. +var DeprecatedFlagsUsed = promauto.NewCounter( + prometheus.CounterOpts{ + Namespace: "cortex", + Name: "deprecated_flags_inuse_total", + Help: "The number of deprecated flags currently set.", + }) + type deprecatedFlag struct { name string } @@ -18,6 +28,7 @@ func (deprecatedFlag) String() string { func (d deprecatedFlag) Set(string) error { level.Warn(util.Logger).Log("msg", "flag disabled", "flag", d.name) + DeprecatedFlagsUsed.Inc() return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go index b4aeabb48b22a..537d3cfd94754 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go @@ -254,6 +254,35 @@ func (d MetricFamiliesPerUser) SendSumOfHistograms(out chan<- prometheus.Metric, out <- hd.Metric(desc) } +func (d MetricFamiliesPerUser) SendSumOfHistogramsWithLabels(out chan<- prometheus.Metric, desc *prometheus.Desc, histogramName string, labelNames ...string) { + type histogramResult struct { + data HistogramData + labelValues []string + } + + result := map[string]histogramResult{} + + for _, userMetrics := range d { + metricsPerLabelValue := getMetricsWithLabelNames(userMetrics[histogramName], labelNames) + + for key, mwl := range metricsPerLabelValue { + for _, m := range mwl.metrics { + r := result[key] + if r.labelValues == nil { + r.labelValues = mwl.labelValues + } + + r.data.AddHistogram(m.GetHistogram()) + result[key] = r + } + } + } + + for _, hg := range result { + out <- hg.data.Metric(desc, hg.labelValues...) + } +} + // struct for holding metrics with same label values type metricsWithLabels struct { labelValues []string @@ -405,8 +434,8 @@ func (d *HistogramData) AddHistogramData(histo HistogramData) { } // Return prometheus metric from this histogram data. -func (d *HistogramData) Metric(desc *prometheus.Desc) prometheus.Metric { - return prometheus.MustNewConstHistogram(desc, d.sampleCount, d.sampleSum, d.buckets) +func (d *HistogramData) Metric(desc *prometheus.Desc, labelValues ...string) prometheus.Metric { + return prometheus.MustNewConstHistogram(desc, d.sampleCount, d.sampleSum, d.buckets, labelValues...) } // Creates new histogram data collector. diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/strings.go b/vendor/github.com/cortexproject/cortex/pkg/util/strings.go new file mode 100644 index 0000000000000..39868e1d1cbf6 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/strings.go @@ -0,0 +1,12 @@ +package util + +// StringsContain returns true if the search value is within the list of input values. +func StringsContain(values []string, search string) bool { + for _, v := range values { + if search == v { + return true + } + } + + return false +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index 889893dd1d0ca..12476479e0a2e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -31,6 +31,8 @@ const ( // ErrQueryTooLong is used in chunk store and query frontend. ErrQueryTooLong = "invalid query, length > limit (%s > %s)" + missingMetricName = "missing_metric_name" + invalidMetricName = "metric_name_invalid" greaterThanMaxSampleAge = "greater_than_max_sample_age" maxLabelNamesPerSeries = "max_label_names_per_series" tooFarInFuture = "too_far_in_future" @@ -93,10 +95,12 @@ func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelA metricName, err := extract.MetricNameFromLabelAdapters(ls) if cfg.EnforceMetricName(userID) { if err != nil { + DiscardedSamples.WithLabelValues(missingMetricName, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errMissingMetricName) } if !model.IsValidMetricName(model.LabelValue(metricName)) { + DiscardedSamples.WithLabelValues(invalidMetricName, userID).Inc() return httpgrpc.Errorf(http.StatusBadRequest, errInvalidMetricName, metricName) } } diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index d4b92663bacfb..9159de03e03db 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -1,15 +1,10 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip script: - - go test -v ./... + - make check diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 0000000000000..ce9d7cded649a --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 6483ba2afb512..54dfdcb12ea1b 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -41,11 +41,18 @@ default: [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. ## License diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 7421f326ffe84..161aea2582969 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -82,7 +82,7 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // @@ -159,6 +159,9 @@ type withStack struct { func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -241,6 +244,9 @@ type withMessage struct { func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 0000000000000..be0d10d0c793d --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 2874a048cf3e6..779a8348fb9c2 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -5,10 +5,13 @@ import ( "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ func (f Frame) line() int { return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) { case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) { } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame @@ -94,16 +110,30 @@ func (st StackTrace) Format(s fmt.State, verb rune) { switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) } + io.WriteString(s, "]") } // stack represents a stack of program counters. diff --git a/vendor/github.com/thanos-io/thanos/LICENSE b/vendor/github.com/thanos-io/thanos/LICENSE new file mode 100644 index 0000000000000..8dada3edaf50d --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go new file mode 100644 index 0000000000000..4e3fb492e2eb6 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go @@ -0,0 +1,152 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package miekgdns + +import ( + "bytes" + "net" + + "github.com/miekg/dns" + "github.com/pkg/errors" +) + +// Copied and slightly adjusted from Prometheus DNS SD: +// https://github.com/prometheus/prometheus/blob/be3c082539d85908ce03b6d280f83343e7c930eb/discovery/dns/dns.go#L212 + +// lookupWithSearchPath tries to get an answer for various permutations of +// the given name, appending the system-configured search path as necessary. +// +// There are three possible outcomes: +// +// 1. One of the permutations of the given name is recognized as +// "valid" by the DNS, in which case we consider ourselves "done" +// and that answer is returned. Note that, due to the way the DNS +// handles "name has resource records, but none of the specified type", +// the answer received may have an empty set of results. +// +// 2. All of the permutations of the given name are responded to by one of +// the servers in the "nameservers" list with the answer "that name does +// not exist" (NXDOMAIN). In that case, it can be considered +// pseudo-authoritative that there are no records for that name. +// +// 3. One or more of the names was responded to by all servers with some +// sort of error indication. In that case, we can't know if, in fact, +// there are records for the name or not, so whatever state the +// configuration is in, we should keep it that way until we know for +// sure (by, presumably, all the names getting answers in the future). +// +// Outcomes 1 and 2 are indicated by a valid response message (possibly an +// empty one) and no error. Outcome 3 is indicated by an error return. The +// error will be generic-looking, because trying to return all the errors +// returned by the combination of all name permutations and servers is a +// nightmare. +func (r *Resolver) lookupWithSearchPath(name string, qtype dns.Type) (*dns.Msg, error) { + conf, err := dns.ClientConfigFromFile(r.ResolvConf) + if err != nil { + return nil, errors.Wrapf(err, "could not load resolv.conf: %s", err) + } + + var errs []error + for _, lname := range conf.NameList(name) { + response, err := lookupFromAnyServer(lname, qtype, conf) + if err != nil { + // We can't go home yet, because a later name + // may give us a valid, successful answer. However + // we can no longer say "this name definitely doesn't + // exist", because we did not get that answer for + // at least one name. + errs = append(errs, err) + continue + } + + if response.Rcode == dns.RcodeSuccess { + // Outcome 1: GOLD! + return response, nil + } + } + + if len(errs) == 0 { + // Outcome 2: everyone says NXDOMAIN. + return &dns.Msg{}, nil + } + // Outcome 3: boned. + return nil, errors.Errorf("could not resolve %q: all servers responded with errors to at least one search domain. Errs %s", name, fmtErrs(errs)) +} + +// lookupFromAnyServer uses all configured servers to try and resolve a specific +// name. If a viable answer is received from a server, then it is +// immediately returned, otherwise the other servers in the config are +// tried, and if none of them return a viable answer, an error is returned. +// +// A "viable answer" is one which indicates either: +// +// 1. "yes, I know that name, and here are its records of the requested type" +// (RCODE==SUCCESS, ANCOUNT > 0); +// 2. "yes, I know that name, but it has no records of the requested type" +// (RCODE==SUCCESS, ANCOUNT==0); or +// 3. "I know that name doesn't exist" (RCODE==NXDOMAIN). +// +// A non-viable answer is "anything else", which encompasses both various +// system-level problems (like network timeouts) and also +// valid-but-unexpected DNS responses (SERVFAIL, REFUSED, etc). +func lookupFromAnyServer(name string, qtype dns.Type, conf *dns.ClientConfig) (*dns.Msg, error) { + client := &dns.Client{} + + var errs []error + + // TODO(bwplotka): Worth to do fanout and grab fastest as golang native lib? + for _, server := range conf.Servers { + servAddr := net.JoinHostPort(server, conf.Port) + msg, err := askServerForName(name, qtype, client, servAddr, true) + if err != nil { + errs = append(errs, errors.Wrapf(err, "resolution against server %s for %s", server, name)) + continue + } + + if msg.Rcode == dns.RcodeSuccess || msg.Rcode == dns.RcodeNameError { + return msg, nil + } + } + + return nil, errors.Errorf("could not resolve %s: no servers returned a viable answer. Errs %v", name, fmtErrs(errs)) +} + +func fmtErrs(errs []error) string { + b := bytes.Buffer{} + for _, err := range errs { + b.WriteString(";") + b.WriteString(err.Error()) + } + return b.String() +} + +// askServerForName makes a request to a specific DNS server for a specific +// name (and qtype). Retries with TCP in the event of response truncation, +// but otherwise just sends back whatever the server gave, whether that be a +// valid-looking response, or an error. +func askServerForName(name string, qType dns.Type, client *dns.Client, servAddr string, edns bool) (*dns.Msg, error) { + msg := &dns.Msg{} + + msg.SetQuestion(dns.Fqdn(name), uint16(qType)) + if edns { + msg.SetEdns0(dns.DefaultMsgSize, false) + } + + response, _, err := client.Exchange(msg, servAddr) + if err != nil { + return nil, errors.Wrapf(err, "exchange") + } + + if response.Truncated { + if client.Net == "tcp" { + return nil, errors.Errorf("got truncated message on TCP (64kiB limit exceeded?)") + } + + // TCP fallback. + client.Net = "tcp" + return askServerForName(name, qType, client, servAddr, false) + } + + return response, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go new file mode 100644 index 0000000000000..e62660f12c853 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go @@ -0,0 +1,74 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package miekgdns + +import ( + "context" + "net" + + "github.com/miekg/dns" + "github.com/pkg/errors" +) + +// DefaultResolvConfPath is a common, default resolv.conf file present on linux server. +const DefaultResolvConfPath = "/etc/resolv.conf" + +// Resolver is a drop-in Resolver for *part* of std lib Golang net.DefaultResolver methods. +type Resolver struct { + ResolvConf string +} + +func (r *Resolver) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) { + var target string + if service == "" && proto == "" { + target = name + } else { + target = "_" + service + "._" + proto + "." + name + } + + response, err := r.lookupWithSearchPath(target, dns.Type(dns.TypeSRV)) + if err != nil { + return "", nil, err + } + + for _, record := range response.Answer { + switch addr := record.(type) { + case *dns.SRV: + addrs = append(addrs, &net.SRV{ + Weight: addr.Weight, + Target: addr.Target, + Priority: addr.Priority, + Port: addr.Port, + }) + default: + return "", nil, errors.Errorf("invalid SRV response record %s", record) + } + } + + return "", addrs, nil +} + +func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) { + response, err := r.lookupWithSearchPath(host, dns.Type(dns.TypeAAAA)) + if err != nil || len(response.Answer) == 0 { + // Ugly fallback to A lookup. + response, err = r.lookupWithSearchPath(host, dns.Type(dns.TypeA)) + if err != nil { + return nil, err + } + } + + var resp []net.IPAddr + for _, record := range response.Answer { + switch addr := record.(type) { + case *dns.A: + resp = append(resp, net.IPAddr{IP: addr.A}) + case *dns.AAAA: + resp = append(resp, net.IPAddr{IP: addr.AAAA}) + default: + return nil, errors.Errorf("invalid A or AAAA response record %s", record) + } + } + return resp, nil +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go new file mode 100644 index 0000000000000..a3e0730730938 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -0,0 +1,152 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package dns + +import ( + "context" + "net" + "strings" + "sync" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns" +) + +// Provider is a stateful cache for asynchronous DNS resolutions. It provides a way to resolve addresses and obtain them. +type Provider struct { + sync.Mutex + resolver Resolver + // A map from domain name to a slice of resolved targets. + resolved map[string][]string + logger log.Logger + + resolverAddrs *prometheus.GaugeVec + resolverLookupsCount prometheus.Counter + resolverFailuresCount prometheus.Counter +} + +type ResolverType string + +const ( + GolangResolverType ResolverType = "golang" + MiekgdnsResolverType ResolverType = "miekgdns" +) + +func (t ResolverType) ToResolver(logger log.Logger) ipLookupResolver { + var r ipLookupResolver + switch t { + case GolangResolverType: + r = net.DefaultResolver + case MiekgdnsResolverType: + r = &miekgdns.Resolver{ResolvConf: miekgdns.DefaultResolvConfPath} + default: + level.Warn(logger).Log("msg", "no such resolver type, defaulting to golang", "type", t) + r = net.DefaultResolver + } + return r +} + +// NewProvider returns a new empty provider with a given resolver type. +// If empty resolver type is net.DefaultResolver.w +func NewProvider(logger log.Logger, reg prometheus.Registerer, resolverType ResolverType) *Provider { + p := &Provider{ + resolver: NewResolver(resolverType.ToResolver(logger)), + resolved: make(map[string][]string), + logger: logger, + resolverAddrs: prometheus.NewGaugeVec(prometheus.GaugeOpts{ + Name: "dns_provider_results", + Help: "The number of resolved endpoints for each configured address", + }, []string{"addr"}), + resolverLookupsCount: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "dns_lookups_total", + Help: "The number of DNS lookups resolutions attempts", + }), + resolverFailuresCount: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "dns_failures_total", + Help: "The number of DNS lookup failures", + }), + } + + if reg != nil { + reg.MustRegister(p.resolverAddrs) + reg.MustRegister(p.resolverLookupsCount) + reg.MustRegister(p.resolverFailuresCount) + } + + return p +} + +// Clone returns a new provider from an existing one. +func (p *Provider) Clone() *Provider { + return &Provider{ + resolver: p.resolver, + resolved: make(map[string][]string), + logger: p.logger, + resolverAddrs: p.resolverAddrs, + resolverLookupsCount: p.resolverLookupsCount, + resolverFailuresCount: p.resolverFailuresCount, + } +} + +// Resolve stores a list of provided addresses or their DNS records if requested. +// Addresses prefixed with `dns+` or `dnssrv+` will be resolved through respective DNS lookup (A/AAAA or SRV). +// defaultPort is used for non-SRV records when a port is not supplied. +func (p *Provider) Resolve(ctx context.Context, addrs []string) { + p.Lock() + defer p.Unlock() + + for _, addr := range addrs { + var resolved []string + qtypeAndName := strings.SplitN(addr, "+", 2) + if len(qtypeAndName) != 2 { + // No lookup specified. Add to results and continue to the next address. + p.resolved[addr] = []string{addr} + continue + } + qtype, name := qtypeAndName[0], qtypeAndName[1] + + resolved, err := p.resolver.Resolve(ctx, name, QType(qtype)) + p.resolverLookupsCount.Inc() + if err != nil { + // The DNS resolution failed. Continue without modifying the old records. + p.resolverFailuresCount.Inc() + level.Error(p.logger).Log("msg", "dns resolution failed", "addr", addr, "err", err) + continue + } + p.resolved[addr] = resolved + } + + // Remove stored addresses that are no longer requested. + for existingAddr := range p.resolved { + if !contains(addrs, existingAddr) { + delete(p.resolved, existingAddr) + p.resolverAddrs.DeleteLabelValues(existingAddr) + } else { + p.resolverAddrs.WithLabelValues(existingAddr).Set(float64(len(p.resolved[existingAddr]))) + } + } +} + +// Addresses returns the latest addresses present in the Provider. +func (p *Provider) Addresses() []string { + p.Lock() + defer p.Unlock() + + var result []string + for _, addrs := range p.resolved { + result = append(result, addrs...) + } + return result +} + +func contains(slice []string, str string) bool { + for _, s := range slice { + if str == s { + return true + } + } + return false +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go new file mode 100644 index 0000000000000..ef730547689bd --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go @@ -0,0 +1,117 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package dns + +import ( + "context" + "net" + "strconv" + "strings" + + "github.com/pkg/errors" +) + +type QType string + +const ( + // A qtype performs A/AAAA lookup. + A = QType("dns") + // SRV qtype performs SRV lookup with A/AAAA lookup for each SRV result. + SRV = QType("dnssrv") + // SRVNoA qtype performs SRV lookup without any A/AAAA lookup for each SRV result. + SRVNoA = QType("dnssrvnoa") +) + +type Resolver interface { + // Resolve performs a DNS lookup and returns a list of records. + // name is the domain name to be resolved. + // qtype is the query type. Accepted values are `dns` for A/AAAA lookup and `dnssrv` for SRV lookup. + // If scheme is passed through name, it is preserved on IP results. + Resolve(ctx context.Context, name string, qtype QType) ([]string, error) +} + +type ipLookupResolver interface { + LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) + LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) +} + +type dnsSD struct { + resolver ipLookupResolver +} + +// NewResolver creates a resolver with given underlying resolver. +func NewResolver(resolver ipLookupResolver) Resolver { + return &dnsSD{resolver: resolver} +} + +func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string, error) { + var ( + res []string + scheme string + ) + + schemeSplit := strings.Split(name, "//") + if len(schemeSplit) > 1 { + scheme = schemeSplit[0] + name = schemeSplit[1] + } + + // Split the host and port if present. + host, port, err := net.SplitHostPort(name) + if err != nil { + // The host could be missing a port. + host, port = name, "" + } + + switch qtype { + case A: + if port == "" { + return nil, errors.Errorf("missing port in address given for dns lookup: %v", name) + } + ips, err := s.resolver.LookupIPAddr(ctx, host) + if err != nil { + return nil, errors.Wrapf(err, "lookup IP addresses %q", host) + } + for _, ip := range ips { + res = append(res, appendScheme(scheme, net.JoinHostPort(ip.String(), port))) + } + case SRV, SRVNoA: + _, recs, err := s.resolver.LookupSRV(ctx, "", "", host) + if err != nil { + return nil, errors.Wrapf(err, "lookup SRV records %q", host) + } + + for _, rec := range recs { + // Only use port from SRV record if no explicit port was specified. + resPort := port + if resPort == "" { + resPort = strconv.Itoa(int(rec.Port)) + } + + if qtype == SRVNoA { + res = append(res, appendScheme(scheme, net.JoinHostPort(rec.Target, resPort))) + continue + } + // Do A lookup for the domain in SRV answer. + resIPs, err := s.resolver.LookupIPAddr(ctx, rec.Target) + if err != nil { + return nil, errors.Wrapf(err, "look IP addresses %q", rec.Target) + } + for _, resIP := range resIPs { + res = append(res, appendScheme(scheme, net.JoinHostPort(resIP.String(), resPort))) + } + } + default: + return nil, errors.Errorf("invalid lookup scheme %q", qtype) + } + + return res, nil +} + +func appendScheme(scheme, host string) string { + if scheme == "" { + return host + } + return scheme + "//" + host +} diff --git a/vendor/golang.org/x/mod/LICENSE b/vendor/golang.org/x/mod/LICENSE new file mode 100644 index 0000000000000..6a66aea5eafe0 --- /dev/null +++ b/vendor/golang.org/x/mod/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/mod/PATENTS b/vendor/golang.org/x/mod/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/golang.org/x/mod/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/tools/internal/module/module.go b/vendor/golang.org/x/mod/module/module.go similarity index 57% rename from vendor/golang.org/x/tools/internal/module/module.go rename to vendor/golang.org/x/mod/module/module.go index 9a4edb9dec159..6cd37280a85f5 100644 --- a/vendor/golang.org/x/tools/internal/module/module.go +++ b/vendor/golang.org/x/mod/module/module.go @@ -2,8 +2,86 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package module defines the module.Version type -// along with support code. +// Package module defines the module.Version type along with support code. +// +// The module.Version type is a simple Path, Version pair: +// +// type Version struct { +// Path string +// Version string +// } +// +// There are no restrictions imposed directly by use of this structure, +// but additional checking functions, most notably Check, verify that +// a particular path, version pair is valid. +// +// Escaped Paths +// +// Module paths appear as substrings of file system paths +// (in the download cache) and of web server URLs in the proxy protocol. +// In general we cannot rely on file systems to be case-sensitive, +// nor can we rely on web servers, since they read from file systems. +// That is, we cannot rely on the file system to keep rsc.io/QUOTE +// and rsc.io/quote separate. Windows and macOS don't. +// Instead, we must never require two different casings of a file path. +// Because we want the download cache to match the proxy protocol, +// and because we want the proxy protocol to be possible to serve +// from a tree of static files (which might be stored on a case-insensitive +// file system), the proxy protocol must never require two different casings +// of a URL path either. +// +// One possibility would be to make the escaped form be the lowercase +// hexadecimal encoding of the actual path bytes. This would avoid ever +// needing different casings of a file path, but it would be fairly illegible +// to most programmers when those paths appeared in the file system +// (including in file paths in compiler errors and stack traces) +// in web server logs, and so on. Instead, we want a safe escaped form that +// leaves most paths unaltered. +// +// The safe escaped form is to replace every uppercase letter +// with an exclamation mark followed by the letter's lowercase equivalent. +// +// For example, +// +// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. +// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy +// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. +// +// Import paths that avoid upper-case letters are left unchanged. +// Note that because import paths are ASCII-only and avoid various +// problematic punctuation (like : < and >), the escaped form is also ASCII-only +// and avoids the same problematic punctuation. +// +// Import paths have never allowed exclamation marks, so there is no +// need to define how to escape a literal !. +// +// Unicode Restrictions +// +// Today, paths are disallowed from using Unicode. +// +// Although paths are currently disallowed from using Unicode, +// we would like at some point to allow Unicode letters as well, to assume that +// file systems and URLs are Unicode-safe (storing UTF-8), and apply +// the !-for-uppercase convention for escaping them in the file system. +// But there are at least two subtle considerations. +// +// First, note that not all case-fold equivalent distinct runes +// form an upper/lower pair. +// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) +// are three distinct runes that case-fold to each other. +// When we do add Unicode letters, we must not assume that upper/lower +// are the only case-equivalent pairs. +// Perhaps the Kelvin symbol would be disallowed entirely, for example. +// Or perhaps it would escape as "!!k", or perhaps as "(212A)". +// +// Second, it would be nice to allow Unicode marks as well as letters, +// but marks include combining marks, and then we must deal not +// only with case folding but also normalization: both U+00E9 ('é') +// and U+0065 U+0301 ('e' followed by combining acute accent) +// look the same on the page and are treated by some file systems +// as the same path. If we do allow Unicode marks in paths, there +// must be some kind of normalization to allow only one canonical +// encoding of any character used in an import path. package module // IMPORTANT NOTE @@ -24,22 +102,95 @@ import ( "unicode" "unicode/utf8" - "golang.org/x/tools/internal/semver" + "golang.org/x/mod/semver" + errors "golang.org/x/xerrors" ) -// A Version is defined by a module path and version pair. +// A Version (for clients, a module.Version) is defined by a module path and version pair. +// These are stored in their plain (unescaped) form. type Version struct { + // Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2". Path string // Version is usually a semantic version in canonical form. - // There are two exceptions to this general rule. + // There are three exceptions to this general rule. // First, the top-level target of a build has no specific version // and uses Version = "". // Second, during MVS calculations the version "none" is used // to represent the decision to take no version of a given module. + // Third, filesystem paths found in "replace" directives are + // represented by a path with an empty version. Version string `json:",omitempty"` } +// String returns a representation of the Version suitable for logging +// (Path@Version, or just Path if Version is empty). +func (m Version) String() string { + if m.Version == "" { + return m.Path + } + return m.Path + "@" + m.Version +} + +// A ModuleError indicates an error specific to a module. +type ModuleError struct { + Path string + Version string + Err error +} + +// VersionError returns a ModuleError derived from a Version and error, +// or err itself if it is already such an error. +func VersionError(v Version, err error) error { + var mErr *ModuleError + if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version { + return err + } + return &ModuleError{ + Path: v.Path, + Version: v.Version, + Err: err, + } +} + +func (e *ModuleError) Error() string { + if v, ok := e.Err.(*InvalidVersionError); ok { + return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err) + } + if e.Version != "" { + return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err) + } + return fmt.Sprintf("module %s: %v", e.Path, e.Err) +} + +func (e *ModuleError) Unwrap() error { return e.Err } + +// An InvalidVersionError indicates an error specific to a version, with the +// module path unknown or specified externally. +// +// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError +// must not wrap a ModuleError. +type InvalidVersionError struct { + Version string + Pseudo bool + Err error +} + +// noun returns either "version" or "pseudo-version", depending on whether +// e.Version is a pseudo-version. +func (e *InvalidVersionError) noun() string { + if e.Pseudo { + return "pseudo-version" + } + return "version" +} + +func (e *InvalidVersionError) Error() string { + return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err) +} + +func (e *InvalidVersionError) Unwrap() error { return e.Err } + // Check checks that a given module path, version pair is valid. // In addition to the path being a valid module path // and the version being a valid semantic version, @@ -51,17 +202,14 @@ func Check(path, version string) error { return err } if !semver.IsValid(version) { - return fmt.Errorf("malformed semantic version %v", version) + return &ModuleError{ + Path: path, + Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")}, + } } _, pathMajor, _ := SplitPathVersion(path) - if !MatchPathMajor(version, pathMajor) { - if pathMajor == "" { - pathMajor = "v0 or v1" - } - if pathMajor[0] == '.' { // .v1 - pathMajor = pathMajor[1:] - } - return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor) + if err := CheckPathMajor(version, pathMajor); err != nil { + return &ModuleError{Path: path, Err: err} } return nil } @@ -79,7 +227,7 @@ func firstPathOK(r rune) bool { // Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. // This matches what "go get" has historically recognized in import paths. // TODO(rsc): We would like to allow Unicode letters, but that requires additional -// care in the safe encoding (see note below). +// care in the safe encoding (see "escaped paths" above). func pathOK(r rune) bool { if r < utf8.RuneSelf { return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' || @@ -94,7 +242,7 @@ func pathOK(r rune) bool { // For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters. // If we expand the set of allowed characters here, we have to // work harder at detecting potential case-folding and normalization collisions. -// See note about "safe encoding" below. +// See note about "escaped paths" above. func fileNameOK(r rune) bool { if r < utf8.RuneSelf { // Entire set of ASCII punctuation, from which we remove characters: @@ -120,6 +268,17 @@ func fileNameOK(r rune) bool { } // CheckPath checks that a module path is valid. +// A valid module path is a valid import path, as checked by CheckImportPath, +// with two additional constraints. +// First, the leading path element (up to the first slash, if any), +// by convention a domain name, must contain only lower-case ASCII letters, +// ASCII digits, dots (U+002E), and dashes (U+002D); +// it must contain at least one dot and cannot start with a dash. +// Second, for a final path element of the form /vN, where N looks numeric +// (ASCII digits and dots) must not begin with a leading zero, must not be /v1, +// and must not contain any dots. For paths beginning with "gopkg.in/", +// this second requirement is replaced by a requirement that the path +// follow the gopkg.in server's conventions. func CheckPath(path string) error { if err := checkPath(path, false); err != nil { return fmt.Errorf("malformed module path %q: %v", path, err) @@ -149,6 +308,20 @@ func CheckPath(path string) error { } // CheckImportPath checks that an import path is valid. +// +// A valid import path consists of one or more valid path elements +// separated by slashes (U+002F). (It must not begin with nor end in a slash.) +// +// A valid path element is a non-empty string made up of +// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~. +// It must not begin or end with a dot (U+002E), nor contain two dots in a row. +// +// The element prefix up to the first dot must not be a reserved file name +// on Windows, regardless of case (CON, com1, NuL, and so on). +// +// CheckImportPath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. func CheckImportPath(path string) error { if err := checkPath(path, false); err != nil { return fmt.Errorf("malformed import path %q: %v", path, err) @@ -169,8 +342,8 @@ func checkPath(path string, fileName bool) error { if path == "" { return fmt.Errorf("empty string") } - if strings.Contains(path, "..") { - return fmt.Errorf("double dot") + if path[0] == '-' { + return fmt.Errorf("leading dash") } if strings.Contains(path, "//") { return fmt.Errorf("double slash") @@ -226,13 +399,24 @@ func checkElem(elem string, fileName bool) error { } for _, bad := range badWindowsNames { if strings.EqualFold(bad, short) { - return fmt.Errorf("disallowed path element %q", elem) + return fmt.Errorf("%q disallowed as path element component on Windows", short) } } return nil } -// CheckFilePath checks whether a slash-separated file path is valid. +// CheckFilePath checks that a slash-separated file path is valid. +// The definition of a valid file path is the same as the definition +// of a valid import path except that the set of allowed characters is larger: +// all Unicode letters, ASCII digits, the ASCII space character (U+0020), +// and the ASCII punctuation characters +// “!#$%&()+,-.=@[]^_{}~”. +// (The excluded punctuation characters, " * < > ? ` ' | / \ and :, +// have special meanings in certain shells or operating systems.) +// +// CheckFilePath may be less restrictive in the future, but see the +// top-level package documentation for additional information about +// subtleties of Unicode. func CheckFilePath(path string) error { if err := checkPath(path, true); err != nil { return fmt.Errorf("malformed file path %q: %v", path, err) @@ -271,6 +455,9 @@ var badWindowsNames = []string{ // and version is either empty or "/vN" for N >= 2. // As a special case, gopkg.in paths are recognized directly; // they require ".vN" instead of "/vN", and for all N, not just N >= 2. +// SplitPathVersion returns with ok = false when presented with +// a path whose last path element does not satisfy the constraints +// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2". func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) { if strings.HasPrefix(path, "gopkg.in/") { return splitGopkgIn(path) @@ -319,20 +506,65 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) { // MatchPathMajor reports whether the semantic version v // matches the path major version pathMajor. +// +// MatchPathMajor returns true if and only if CheckPathMajor returns nil. func MatchPathMajor(v, pathMajor string) bool { + return CheckPathMajor(v, pathMajor) == nil +} + +// CheckPathMajor returns a non-nil error if the semantic version v +// does not match the path major version pathMajor. +func CheckPathMajor(v, pathMajor string) error { + // TODO(jayconrod): return errors or panic for invalid inputs. This function + // (and others) was covered by integration tests for cmd/go, and surrounding + // code protected against invalid inputs like non-canonical versions. if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { pathMajor = strings.TrimSuffix(pathMajor, "-unstable") } if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" { // Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1. // For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405. - return true + return nil } m := semver.Major(v) if pathMajor == "" { - return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" + if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" { + return nil + } + pathMajor = "v0 or v1" + } else if pathMajor[0] == '/' || pathMajor[0] == '.' { + if m == pathMajor[1:] { + return nil + } + pathMajor = pathMajor[1:] + } + return &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)), } - return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:] +} + +// PathMajorPrefix returns the major-version tag prefix implied by pathMajor. +// An empty PathMajorPrefix allows either v0 or v1. +// +// Note that MatchPathMajor may accept some versions that do not actually begin +// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1' +// pathMajor, even though that pathMajor implies 'v1' tagging. +func PathMajorPrefix(pathMajor string) string { + if pathMajor == "" { + return "" + } + if pathMajor[0] != '/' && pathMajor[0] != '.' { + panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator") + } + if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") { + pathMajor = strings.TrimSuffix(pathMajor, "-unstable") + } + m := pathMajor[1:] + if m != semver.Major(m) { + panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version") + } + return m } // CanonicalVersion returns the canonical form of the version string v. @@ -345,7 +577,10 @@ func CanonicalVersion(v string) string { return cv } -// Sort sorts the list by Path, breaking ties by comparing Versions. +// Sort sorts the list by Path, breaking ties by comparing Version fields. +// The Version fields are interpreted as semantic versions (using semver.Compare) +// optionally followed by a tie-breaking suffix introduced by a slash character, +// like in "v0.0.1/go.mod". func Sort(list []Version) { sort.Slice(list, func(i, j int) bool { mi := list[i] @@ -372,93 +607,36 @@ func Sort(list []Version) { }) } -// Safe encodings -// -// Module paths appear as substrings of file system paths -// (in the download cache) and of web server URLs in the proxy protocol. -// In general we cannot rely on file systems to be case-sensitive, -// nor can we rely on web servers, since they read from file systems. -// That is, we cannot rely on the file system to keep rsc.io/QUOTE -// and rsc.io/quote separate. Windows and macOS don't. -// Instead, we must never require two different casings of a file path. -// Because we want the download cache to match the proxy protocol, -// and because we want the proxy protocol to be possible to serve -// from a tree of static files (which might be stored on a case-insensitive -// file system), the proxy protocol must never require two different casings -// of a URL path either. -// -// One possibility would be to make the safe encoding be the lowercase -// hexadecimal encoding of the actual path bytes. This would avoid ever -// needing different casings of a file path, but it would be fairly illegible -// to most programmers when those paths appeared in the file system -// (including in file paths in compiler errors and stack traces) -// in web server logs, and so on. Instead, we want a safe encoding that -// leaves most paths unaltered. -// -// The safe encoding is this: -// replace every uppercase letter with an exclamation mark -// followed by the letter's lowercase equivalent. -// -// For example, -// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go. -// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy -// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus. -// -// Import paths that avoid upper-case letters are left unchanged. -// Note that because import paths are ASCII-only and avoid various -// problematic punctuation (like : < and >), the safe encoding is also ASCII-only -// and avoids the same problematic punctuation. -// -// Import paths have never allowed exclamation marks, so there is no -// need to define how to encode a literal !. -// -// Although paths are disallowed from using Unicode (see pathOK above), -// the eventual plan is to allow Unicode letters as well, to assume that -// file systems and URLs are Unicode-safe (storing UTF-8), and apply -// the !-for-uppercase convention. Note however that not all runes that -// are different but case-fold equivalent are an upper/lower pair. -// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin) -// are considered to case-fold to each other. When we do add Unicode -// letters, we must not assume that upper/lower are the only case-equivalent pairs. -// Perhaps the Kelvin symbol would be disallowed entirely, for example. -// Or perhaps it would encode as "!!k", or perhaps as "(212A)". -// -// Also, it would be nice to allow Unicode marks as well as letters, -// but marks include combining marks, and then we must deal not -// only with case folding but also normalization: both U+00E9 ('é') -// and U+0065 U+0301 ('e' followed by combining acute accent) -// look the same on the page and are treated by some file systems -// as the same path. If we do allow Unicode marks in paths, there -// must be some kind of normalization to allow only one canonical -// encoding of any character used in an import path. - -// EncodePath returns the safe encoding of the given module path. +// EscapePath returns the escaped form of the given module path. // It fails if the module path is invalid. -func EncodePath(path string) (encoding string, err error) { +func EscapePath(path string) (escaped string, err error) { if err := CheckPath(path); err != nil { return "", err } - return encodeString(path) + return escapeString(path) } -// EncodeVersion returns the safe encoding of the given module version. +// EscapeVersion returns the escaped form of the given module version. // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. -func EncodeVersion(v string) (encoding string, err error) { +func EscapeVersion(v string) (escaped string, err error) { if err := checkElem(v, true); err != nil || strings.Contains(v, "!") { - return "", fmt.Errorf("disallowed version string %q", v) + return "", &InvalidVersionError{ + Version: v, + Err: fmt.Errorf("disallowed version string"), + } } - return encodeString(v) + return escapeString(v) } -func encodeString(s string) (encoding string, err error) { +func escapeString(s string) (escaped string, err error) { haveUpper := false for _, r := range s { if r == '!' || r >= utf8.RuneSelf { // This should be disallowed by CheckPath, but diagnose anyway. - // The correctness of the encoding loop below depends on it. - return "", fmt.Errorf("internal error: inconsistency in EncodePath") + // The correctness of the escaping loop below depends on it. + return "", fmt.Errorf("internal error: inconsistency in EscapePath") } if 'A' <= r && r <= 'Z' { haveUpper = true @@ -480,39 +658,39 @@ func encodeString(s string) (encoding string, err error) { return string(buf), nil } -// DecodePath returns the module path of the given safe encoding. -// It fails if the encoding is invalid or encodes an invalid path. -func DecodePath(encoding string) (path string, err error) { - path, ok := decodeString(encoding) +// UnescapePath returns the module path for the given escaped path. +// It fails if the escaped path is invalid or describes an invalid path. +func UnescapePath(escaped string) (path string, err error) { + path, ok := unescapeString(escaped) if !ok { - return "", fmt.Errorf("invalid module path encoding %q", encoding) + return "", fmt.Errorf("invalid escaped module path %q", escaped) } if err := CheckPath(path); err != nil { - return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err) + return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err) } return path, nil } -// DecodeVersion returns the version string for the given safe encoding. -// It fails if the encoding is invalid or encodes an invalid version. +// UnescapeVersion returns the version string for the given escaped version. +// It fails if the escaped form is invalid or describes an invalid version. // Versions are allowed to be in non-semver form but must be valid file names // and not contain exclamation marks. -func DecodeVersion(encoding string) (v string, err error) { - v, ok := decodeString(encoding) +func UnescapeVersion(escaped string) (v string, err error) { + v, ok := unescapeString(escaped) if !ok { - return "", fmt.Errorf("invalid version encoding %q", encoding) + return "", fmt.Errorf("invalid escaped version %q", escaped) } if err := checkElem(v, true); err != nil { - return "", fmt.Errorf("disallowed version string %q", v) + return "", fmt.Errorf("invalid escaped version %q: %v", v, err) } return v, nil } -func decodeString(encoding string) (string, bool) { +func unescapeString(escaped string) (string, bool) { var buf []byte bang := false - for _, r := range encoding { + for _, r := range escaped { if r >= utf8.RuneSelf { return "", false } diff --git a/vendor/golang.org/x/tools/internal/semver/semver.go b/vendor/golang.org/x/mod/semver/semver.go similarity index 99% rename from vendor/golang.org/x/tools/internal/semver/semver.go rename to vendor/golang.org/x/mod/semver/semver.go index 4af7118e55d2e..2988e3cf9c507 100644 --- a/vendor/golang.org/x/tools/internal/semver/semver.go +++ b/vendor/golang.org/x/mod/semver/semver.go @@ -107,7 +107,7 @@ func Build(v string) string { } // Compare returns an integer comparing two versions according to -// according to semantic version precedence. +// semantic version precedence. // The result will be 0 if v == w, -1 if v < w, or +1 if v > w. // // An invalid semantic version string is considered less than a valid one. @@ -263,7 +263,7 @@ func parseBuild(v string) (t, rest string, ok bool) { i := 1 start := 1 for i < len(v) { - if !isIdentChar(v[i]) { + if !isIdentChar(v[i]) && v[i] != '.' { return } if v[i] == '.' { diff --git a/vendor/golang.org/x/tools/cmd/goimports/goimports.go b/vendor/golang.org/x/tools/cmd/goimports/goimports.go index a476a7f3c30a7..a5680d8d6c842 100644 --- a/vendor/golang.org/x/tools/cmd/goimports/goimports.go +++ b/vendor/golang.org/x/tools/cmd/goimports/goimports.go @@ -45,8 +45,12 @@ var ( Fragment: true, // This environment, and its caches, will be reused for the whole run. Env: &imports.ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), }, } exitCode = 0 @@ -258,7 +262,7 @@ func gofmtMain() { if verbose { log.SetFlags(log.LstdFlags | log.Lmicroseconds) - options.Env.Debug = true + options.Env.Logf = log.Printf } if options.TabWidth < 0 { fmt.Fprintf(os.Stderr, "negative tabwidth %d\n", options.TabWidth) diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go index a2353fc88b9c6..ea56b724e8b4d 100644 --- a/vendor/golang.org/x/tools/go/analysis/doc.go +++ b/vendor/golang.org/x/tools/go/analysis/doc.go @@ -1,8 +1,9 @@ /* -The analysis package defines the interface between a modular static +Package analysis defines the interface between a modular static analysis and an analysis driver program. + Background A static analysis is a function that inspects a package of Go code and @@ -41,9 +42,9 @@ the go/analysis/passes/ subdirectory: package unusedresult var Analyzer = &analysis.Analyzer{ - Name: "unusedresult", - Doc: "check for unused results of calls to some functions", - Run: run, + Name: "unusedresult", + Doc: "check for unused results of calls to some functions", + Run: run, ... } @@ -51,7 +52,6 @@ the go/analysis/passes/ subdirectory: ... } - An analysis driver is a program such as vet that runs a set of analyses and prints the diagnostics that they report. The driver program must import the list of Analyzers it needs. @@ -70,51 +70,18 @@ A driver may use the name, flags, and documentation to provide on-line help that describes the analyses it performs. The doc comment contains a brief one-line summary, optionally followed by paragraphs of explanation. -The vet command, shown below, is an example of a driver that runs -multiple analyzers. It is based on the multichecker package -(see the "Standalone commands" section for details). - - $ go build golang.org/x/tools/go/analysis/cmd/vet - $ ./vet help - vet is a tool for static analysis of Go programs. - - Usage: vet [-flag] [package] - - Registered analyzers: - - asmdecl report mismatches between assembly files and Go declarations - assign check for useless assignments - atomic check for common mistakes using the sync/atomic package - ... - unusedresult check for unused results of calls to some functions - - $ ./vet help unusedresult - unusedresult: check for unused results of calls to some functions - - Analyzer flags: - - -unusedresult.funcs value - comma-separated list of functions whose results must be used (default Error,String) - -unusedresult.stringmethods value - comma-separated list of names of methods of type func() string whose results must be used - - Some functions like fmt.Errorf return a result and have no side effects, - so it is always a mistake to discard the result. This analyzer reports - calls to certain functions in which the result of the call is ignored. - - The set of functions may be controlled using flags. The Analyzer type has more fields besides those shown above: type Analyzer struct { - Name string - Doc string - Flags flag.FlagSet - Run func(*Pass) (interface{}, error) - RunDespiteErrors bool - ResultType reflect.Type - Requires []*Analyzer - FactTypes []Fact + Name string + Doc string + Flags flag.FlagSet + Run func(*Pass) (interface{}, error) + RunDespiteErrors bool + ResultType reflect.Type + Requires []*Analyzer + FactTypes []Fact } The Flags field declares a set of named (global) flag variables that @@ -154,13 +121,13 @@ package being analyzed, and provides operations to the Run function for reporting diagnostics and other information back to the driver. type Pass struct { - Fset *token.FileSet - Files []*ast.File - OtherFiles []string - Pkg *types.Package - TypesInfo *types.Info - ResultOf map[*Analyzer]interface{} - Report func(Diagnostic) + Fset *token.FileSet + Files []*ast.File + OtherFiles []string + Pkg *types.Package + TypesInfo *types.Info + ResultOf map[*Analyzer]interface{} + Report func(Diagnostic) ... } @@ -245,7 +212,7 @@ package. An Analyzer that uses facts must declare their types: var Analyzer = &analysis.Analyzer{ - Name: "printf", + Name: "printf", FactTypes: []analysis.Fact{new(isWrapper)}, ... } @@ -330,7 +297,5 @@ entirety as: A tool that provides multiple analyzers can use multichecker in a similar way, giving it the list of Analyzers. - - */ package analysis diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 3e4b195368b35..2087ceec9cfd6 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del // We deleted an entry but now there may be // a blank line-sized hole where the import was. - if line-lastLine > 1 { + if line-lastLine > 1 || !gen.Rparen.IsValid() { // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. + // so there's no need to close the hole. The right parenthesis is + // invalid after AddImport to an import statement without parenthesis. // Do nothing. } else if line != fset.File(gen.Rparen).LineCount() { // There was no blank line. Close the hole. diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go index ddbdd3f08fc2e..3084508b5f813 100644 --- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -90,7 +90,7 @@ func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { // The types argument, if non-empty, enables type-based filtering of // events. The function f if is called only for nodes whose type // matches an element of the types slice. -func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) { +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { mask := maskOf(types) for i := 0; i < len(in.events); { ev := in.events[i] @@ -114,7 +114,7 @@ func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prun // supplies each call to f an additional argument, the current // traversal stack. The stack's first element is the outermost node, // an *ast.File; its last is the innermost, n. -func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) { +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { mask := maskOf(types) var stack []ast.Node for i := 0; i < len(in.events); { diff --git a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go index 9cf186605f6eb..8dcd8bbb71a03 100644 --- a/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go @@ -344,7 +344,7 @@ func (p *parser) expectKeyword(keyword string) { // PackageId = string_lit . // -func (p *parser) parsePackageId() string { +func (p *parser) parsePackageID() string { id, err := strconv.Unquote(p.expect(scanner.String)) if err != nil { p.error(err) @@ -384,7 +384,7 @@ func (p *parser) parseDotIdent() string { // func (p *parser) parseQualifiedName() (id, name string) { p.expect('@') - id = p.parsePackageId() + id = p.parsePackageID() p.expect('.') // Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields. if p.tok == '?' { @@ -696,7 +696,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type { // Complete requires the type's embedded interfaces to be fully defined, // but we do not define any - return types.NewInterface(methods, nil).Complete() + return newInterface(methods, nil).Complete() } // ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type . @@ -785,7 +785,7 @@ func (p *parser) parseType(parent *types.Package) types.Type { func (p *parser) parseImportDecl() { p.expectKeyword("import") name := p.parsePackageName() - p.getPkg(p.parsePackageId(), name) + p.getPkg(p.parsePackageID(), name) } // int_lit = [ "+" | "-" ] { "0" ... "9" } . diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index db0c9a7ea610b..5ee692d383313 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -11,11 +11,10 @@ import ( "encoding/json" "fmt" "go/types" - "log" - "os" "os/exec" "strings" - "time" + + "golang.org/x/tools/internal/gocommand" ) var debug = false @@ -78,97 +77,42 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp } func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) { - args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"} - args = append(args, buildFlags...) - args = append(args, "--", "unsafe") - stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...) + inv := gocommand.Invocation{ + Verb: "list", + Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"}, + Env: env, + BuildFlags: buildFlags, + WorkingDir: dir, + } + stdout, stderr, friendlyErr, rawErr := inv.RunRaw(ctx) var goarch, compiler string - if err != nil { - if strings.Contains(err.Error(), "cannot find main module") { + if rawErr != nil { + if strings.Contains(rawErr.Error(), "cannot find main module") { // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. // TODO(matloob): Is this a problem in practice? - envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") + inv := gocommand.Invocation{ + Verb: "env", + Args: []string{"GOARCH"}, + Env: env, + WorkingDir: dir, + } + envout, enverr := inv.Run(ctx) if enverr != nil { - return nil, err + return nil, enverr } goarch = strings.TrimSpace(envout.String()) compiler = "gc" } else { - return nil, err + return nil, friendlyErr } } else { fields := strings.Fields(stdout.String()) if len(fields) < 2 { - return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>", - cmdDebugStr(env, args...), dir, stdout.String(), stderr.String()) + return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>", + stdout.String(), stderr.String()) } goarch = fields[0] compiler = fields[1] } return types.SizesFor(compiler, goarch), nil } - -// invokeGo returns the stdout and stderr of a go command invocation. -func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) { - if debug { - defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now()) - } - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - cmd := exec.CommandContext(ctx, "go", args...) - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the - // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. - cmd.Env = append(append([]string{}, env...), "PWD="+dir) - cmd.Dir = dir - cmd.Stdout = stdout - cmd.Stderr = stderr - if err := cmd.Run(); err != nil { - exitErr, ok := err.(*exec.ExitError) - if !ok { - // Catastrophic error: - // - executable not found - // - context cancellation - return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) - } - - // Export mode entails a build. - // If that build fails, errors appear on stderr - // (despite the -e flag) and the Export field is blank. - // Do not fail in that case. - if !usesExportData { - return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) - } - } - - // As of writing, go list -export prints some non-fatal compilation - // errors to stderr, even with -e set. We would prefer that it put - // them in the Package.Error JSON (see https://golang.org/issue/26319). - // In the meantime, there's nowhere good to put them, but they can - // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS - // is set. - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr) - } - - // debugging - if false { - fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout) - } - - return stdout, stderr, nil -} - -func cmdDebugStr(envlist []string, args ...string) string { - env := make(map[string]string) - for _, kv := range envlist { - split := strings.Split(kv, "=") - k, v := split[0], split[1] - env[k] = v - } - - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args) -} diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go index 3799f8ed8be18..4bfe28a51ff52 100644 --- a/vendor/golang.org/x/tools/go/packages/doc.go +++ b/vendor/golang.org/x/tools/go/packages/doc.go @@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information. See the documentation for type Config for details. As noted earlier, the Config.Mode controls the amount of detail -reported about the loaded packages, with each mode returning all the data of the -previous mode with some extra added. See the documentation for type LoadMode +reported about the loaded packages. See the documentation for type LoadMode for details. Most tools should pass their command-line arguments (after any flags) diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go index 6ac3e4f5b57d1..8c8473fd0bd26 100644 --- a/vendor/golang.org/x/tools/go/packages/external.go +++ b/vendor/golang.org/x/tools/go/packages/external.go @@ -84,13 +84,14 @@ func findExternalDriver(cfg *Config) driver { cmd.Stdin = bytes.NewReader(req) cmd.Stdout = buf cmd.Stderr = stderr - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) - } if err := cmd.Run(); err != nil { return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr) } + if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" { + fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr) + } + var response driverResponse if err := json.Unmarshal(buf.Bytes(), &response); err != nil { return nil, err diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 648e364313acc..b4a13ef454514 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -6,27 +6,25 @@ package packages import ( "bytes" + "context" "encoding/json" "fmt" "go/types" - "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "reflect" - "regexp" + "sort" "strconv" "strings" "sync" - "time" "unicode" "golang.org/x/tools/go/internal/packagesdriver" - "golang.org/x/tools/internal/gopathwalk" - "golang.org/x/tools/internal/semver" - "golang.org/x/tools/internal/span" + "golang.org/x/tools/internal/gocommand" + "golang.org/x/tools/internal/packagesinternal" ) // debug controls verbose logging. @@ -45,16 +43,21 @@ type responseDeduper struct { dr *driverResponse } -// init fills in r with a driverResponse. -func (r *responseDeduper) init(dr *driverResponse) { - r.dr = dr - r.seenRoots = map[string]bool{} - r.seenPackages = map[string]*Package{} +func newDeduper() *responseDeduper { + return &responseDeduper{ + dr: &driverResponse{}, + seenRoots: map[string]bool{}, + seenPackages: map[string]*Package{}, + } +} + +// addAll fills in r with a driverResponse. +func (r *responseDeduper) addAll(dr *driverResponse) { for _, pkg := range dr.Packages { - r.seenPackages[pkg.ID] = pkg + r.addPackage(pkg) } for _, root := range dr.Roots { - r.seenRoots[root] = true + r.addRoot(root) } } @@ -74,25 +77,47 @@ func (r *responseDeduper) addRoot(id string) { r.dr.Roots = append(r.dr.Roots, id) } -// goInfo contains global information from the go tool. -type goInfo struct { - rootDirs map[string]string - env goEnv +type golistState struct { + cfg *Config + ctx context.Context + + envOnce sync.Once + goEnvError error + goEnv map[string]string + + rootsOnce sync.Once + rootDirsError error + rootDirs map[string]string + + // vendorDirs caches the (non)existence of vendor directories. + vendorDirs map[string]bool } -type goEnv struct { - modulesOn bool +// getEnv returns Go environment variables. Only specific variables are +// populated -- computing all of them is slow. +func (state *golistState) getEnv() (map[string]string, error) { + state.envOnce.Do(func() { + var b *bytes.Buffer + b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH") + if state.goEnvError != nil { + return + } + + state.goEnv = make(map[string]string) + decoder := json.NewDecoder(b) + if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil { + return + } + }) + return state.goEnv, state.goEnvError } -func determineEnv(cfg *Config) goEnv { - buf, err := invokeGo(cfg, "env", "GOMOD") +// mustGetEnv is a convenience function that can be used if getEnv has already succeeded. +func (state *golistState) mustGetEnv() map[string]string { + env, err := state.getEnv() if err != nil { - return goEnv{} + panic(fmt.Sprintf("mustGetEnv: %v", err)) } - gomod := bytes.TrimSpace(buf.Bytes()) - - env := goEnv{} - env.modulesOn = len(gomod) > 0 return env } @@ -100,47 +125,38 @@ func determineEnv(cfg *Config) goEnv { // the build system package structure. // See driver for more details. func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { - var sizes types.Sizes + // Make sure that any asynchronous go commands are killed when we return. + parentCtx := cfg.Context + if parentCtx == nil { + parentCtx = context.Background() + } + ctx, cancel := context.WithCancel(parentCtx) + defer cancel() + + response := newDeduper() + + // Fill in response.Sizes asynchronously if necessary. var sizeserr error var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { - sizes, sizeserr = getSizes(cfg) + var sizes types.Sizes + sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) + // types.SizesFor always returns nil or a *types.StdSizes. + response.dr.Sizes, _ = sizes.(*types.StdSizes) sizeswg.Done() }() } - defer sizeswg.Wait() - - // start fetching rootDirs - var info goInfo - var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) - go func() { - info.rootDirs = determineRootDirs(cfg) - close(rootDirsReady) - }() - go func() { - info.env = determineEnv(cfg) - close(envReady) - }() - getGoInfo := func() *goInfo { - <-rootDirsReady - <-envReady - return &info - } - - // Ensure that we don't leak goroutines: Load is synchronous, so callers will - // not expect it to access the fields of cfg after the call returns. - defer getGoInfo() - // always pass getGoInfo to golistDriver - golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { - return golistDriver(cfg, getGoInfo, patterns...) + state := &golistState{ + cfg: cfg, + ctx: ctx, + vendorDirs: map[string]bool{}, } // Determine files requested in contains patterns var containFiles []string - var packagesNamed []string restPatterns := make([]string, 0, len(patterns)) // Extract file= and other [querytype]= patterns. Report an error if querytype // doesn't exist. @@ -156,8 +172,6 @@ extractQueries: containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) - case "iamashamedtousethedisabledqueryname": - packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) default: @@ -173,52 +187,34 @@ extractQueries: } } - response := &responseDeduper{} - var err error - // See if we have any patterns to pass through to go list. Zero initial // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { - dr, err := golistDriver(cfg, restPatterns...) + dr, err := state.createDriverResponse(restPatterns...) if err != nil { return nil, err } - response.init(dr) - } else { - response.init(&driverResponse{}) - } - - sizeswg.Wait() - if sizeserr != nil { - return nil, sizeserr + response.addAll(dr) } - // types.SizesFor always returns nil or a *types.StdSizes - response.dr.Sizes, _ = sizes.(*types.StdSizes) - - var containsCandidates []string if len(containFiles) != 0 { - if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { - return nil, err - } - } - - if len(packagesNamed) != 0 { - if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { + if err := state.runContainsQueries(response, containFiles); err != nil { return nil, err } } - modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + modifiedPkgs, needPkgs, err := state.processGolistOverlay(response) if err != nil { return nil, err } + + var containsCandidates []string if len(containFiles) > 0 { containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { + if err := state.addNeededOverlayPackages(response, needPkgs); err != nil { return nil, err } // Check candidate packages for containFiles. @@ -247,33 +243,32 @@ extractQueries: } } + sizeswg.Wait() + if sizeserr != nil { + return nil, sizeserr + } return response.dr, nil } -func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { +func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error { if len(pkgs) == 0 { return nil } - drivercfg := *cfg - if getGoInfo().env.modulesOn { - drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") - } - dr, err := driver(&drivercfg, pkgs...) - + dr, err := state.createDriverResponse(pkgs...) if err != nil { return err } for _, pkg := range dr.Packages { response.addPackage(pkg) } - _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) + _, needPkgs, err := state.processGolistOverlay(response) if err != nil { return err } - return addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo) + return state.addNeededOverlayPackages(response, needPkgs) } -func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { +func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -283,42 +278,16 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } - dirResponse, err := driver(cfg, pattern) - if err != nil { - var queryErr error - if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { - return err // return the original error - } - } - // `go list` can report errors for files that are not listed as part of a package's GoFiles. - // In the case of an invalid Go file, we should assume that it is part of package if only - // one package is in the response. The file may have valid contents in an overlay. - if len(dirResponse.Packages) == 1 { - pkg := dirResponse.Packages[0] - for i, err := range pkg.Errors { - s := errorSpan(err) - if !s.IsValid() { - break - } - if len(pkg.CompiledGoFiles) == 0 { - break - } - dir := filepath.Dir(pkg.CompiledGoFiles[0]) - filename := filepath.Join(dir, filepath.Base(s.URI().Filename())) - if info, err := os.Stat(filename); err != nil || info.IsDir() { - break - } - if !contains(pkg.CompiledGoFiles, filename) { - pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename) - pkg.GoFiles = append(pkg.GoFiles, filename) - pkg.Errors = append(pkg.Errors[:i], pkg.Errors[i+1:]...) - } - } - } - // A final attempt to construct an ad-hoc package. - if len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1 { + dirResponse, err := state.createDriverResponse(pattern) + + // If there was an error loading the package, or the package is returned + // with errors, try to load the file as an ad-hoc package. + // Usually the error will appear in a returned package, but may not if we're + // in module mode and the ad-hoc is located outside a module. + if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 && + len(dirResponse.Packages[0].Errors) == 1 { var queryErr error - if dirResponse, queryErr = adHocPackage(cfg, driver, pattern, query); queryErr != nil { + if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil { return err // return the original error } } @@ -347,345 +316,47 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q return nil } -// adHocPackage attempts to construct an ad-hoc package given a query that failed. -func adHocPackage(cfg *Config, driver driver, pattern, query string) (*driverResponse, error) { - // There was an error loading the package. Try to load the file as an ad-hoc package. - // Usually the error will appear in a returned package, but may not if we're in modules mode - // and the ad-hoc is located outside a module. - dirResponse, err := driver(cfg, query) +// adhocPackage attempts to load or construct an ad-hoc package for a given +// query, if the original call to the driver produced inadequate results. +func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) { + response, err := state.createDriverResponse(query) if err != nil { return nil, err } - // If we get nothing back from `go list`, try to make this file into its own ad-hoc package. - if len(dirResponse.Packages) == 0 && err == nil { - dirResponse.Packages = append(dirResponse.Packages, &Package{ + // If we get nothing back from `go list`, + // try to make this file into its own ad-hoc package. + // TODO(rstambler): Should this check against the original response? + if len(response.Packages) == 0 { + response.Packages = append(response.Packages, &Package{ ID: "command-line-arguments", PkgPath: query, GoFiles: []string{query}, CompiledGoFiles: []string{query}, Imports: make(map[string]*Package), }) - dirResponse.Roots = append(dirResponse.Roots, "command-line-arguments") - } - // Special case to handle issue #33482: - // If this is a file= query for ad-hoc packages where the file only exists on an overlay, - // and exists outside of a module, add the file in for the package. - if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || - filepath.ToSlash(dirResponse.Packages[0].PkgPath) == filepath.ToSlash(query)) { - if len(dirResponse.Packages[0].GoFiles) == 0 { - filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath - // TODO(matloob): check if the file is outside of a root dir? - for path := range cfg.Overlay { - if path == filename { - dirResponse.Packages[0].Errors = nil - dirResponse.Packages[0].GoFiles = []string{path} - dirResponse.Packages[0].CompiledGoFiles = []string{path} + response.Roots = append(response.Roots, "command-line-arguments") + } + // Handle special cases. + if len(response.Packages) == 1 { + // golang/go#33482: If this is a file= query for ad-hoc packages where + // the file only exists on an overlay, and exists outside of a module, + // add the file to the package and remove the errors. + if response.Packages[0].ID == "command-line-arguments" || + filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) { + if len(response.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range state.cfg.Overlay { + if path == filename { + response.Packages[0].Errors = nil + response.Packages[0].GoFiles = []string{path} + response.Packages[0].CompiledGoFiles = []string{path} + } } } } } - return dirResponse, nil -} - -func contains(files []string, filename string) bool { - for _, f := range files { - if f == filename { - return true - } - } - return false -} - -// errorSpan attempts to parse a standard `go list` error message -// by stripping off the trailing error message. -// -// It works only on errors whose message is prefixed by colon, -// followed by a space (": "). For example: -// -// attributes.go:13:1: expected 'package', found 'type' -// -func errorSpan(err Error) span.Span { - if err.Pos == "" { - input := strings.TrimSpace(err.Msg) - msgIndex := strings.Index(input, ": ") - if msgIndex < 0 { - return span.Parse(input) - } - return span.Parse(input[:msgIndex]) - } - return span.Parse(err.Pos) -} - -// modCacheRegexp splits a path in a module cache into module, module version, and package. -var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) - -func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { - // calling `go env` isn't free; bail out if there's nothing to do. - if len(queries) == 0 { - return nil - } - // Determine which directories are relevant to scan. - roots, modRoot, err := roots(cfg) - if err != nil { - return err - } - - // Scan the selected directories. Simple matches, from GOPATH/GOROOT - // or the local module, can simply be "go list"ed. Matches from the - // module cache need special treatment. - var matchesMu sync.Mutex - var simpleMatches, modCacheMatches []string - add := func(root gopathwalk.Root, dir string) { - // Walk calls this concurrently; protect the result slices. - matchesMu.Lock() - defer matchesMu.Unlock() - - path := dir - if dir != root.Path { - path = dir[len(root.Path)+1:] - } - if pathMatchesQueries(path, queries) { - switch root.Type { - case gopathwalk.RootModuleCache: - modCacheMatches = append(modCacheMatches, path) - case gopathwalk.RootCurrentModule: - // We'd need to read go.mod to find the full - // import path. Relative's easier. - rel, err := filepath.Rel(cfg.Dir, dir) - if err != nil { - // This ought to be impossible, since - // we found dir in the current module. - panic(err) - } - simpleMatches = append(simpleMatches, "./"+rel) - case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: - simpleMatches = append(simpleMatches, path) - } - } - } - - startWalk := time.Now() - gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) - cfg.Logf("%v for walk", time.Since(startWalk)) - - // Weird special case: the top-level package in a module will be in - // whatever directory the user checked the repository out into. It's - // more reasonable for that to not match the package name. So, if there - // are any Go files in the mod root, query it just to be safe. - if modRoot != "" { - rel, err := filepath.Rel(cfg.Dir, modRoot) - if err != nil { - panic(err) // See above. - } - - files, err := ioutil.ReadDir(modRoot) - if err != nil { - panic(err) // See above. - } - - for _, f := range files { - if strings.HasSuffix(f.Name(), ".go") { - simpleMatches = append(simpleMatches, rel) - break - } - } - } - - addResponse := func(r *driverResponse) { - for _, pkg := range r.Packages { - response.addPackage(pkg) - for _, name := range queries { - if pkg.Name == name { - response.addRoot(pkg.ID) - break - } - } - } - } - - if len(simpleMatches) != 0 { - resp, err := driver(cfg, simpleMatches...) - if err != nil { - return err - } - addResponse(resp) - } - - // Module cache matches are tricky. We want to avoid downloading new - // versions of things, so we need to use the ones present in the cache. - // go list doesn't accept version specifiers, so we have to write out a - // temporary module, and do the list in that module. - if len(modCacheMatches) != 0 { - // Collect all the matches, deduplicating by major version - // and preferring the newest. - type modInfo struct { - mod string - major string - } - mods := make(map[modInfo]string) - var imports []string - for _, modPath := range modCacheMatches { - matches := modCacheRegexp.FindStringSubmatch(modPath) - mod, ver := filepath.ToSlash(matches[1]), matches[2] - importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) - - major := semver.Major(ver) - if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { - mods[modInfo{mod, major}] = ver - } - - imports = append(imports, importPath) - } - - // Build the temporary module. - var gomod bytes.Buffer - gomod.WriteString("module modquery\nrequire (\n") - for mod, version := range mods { - gomod.WriteString("\t" + mod.mod + " " + version + "\n") - } - gomod.WriteString(")\n") - - tmpCfg := *cfg - - // We're only trying to look at stuff in the module cache, so - // disable the network. This should speed things up, and has - // prevented errors in at least one case, #28518. - tmpCfg.Env = append([]string{"GOPROXY=off"}, cfg.Env...) - - var err error - tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") - if err != nil { - return err - } - defer os.RemoveAll(tmpCfg.Dir) - - if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { - return fmt.Errorf("writing go.mod for module cache query: %v", err) - } - - // Run the query, using the import paths calculated from the matches above. - resp, err := driver(&tmpCfg, imports...) - if err != nil { - return fmt.Errorf("querying module cache matches: %v", err) - } - addResponse(resp) - } - - return nil -} - -func getSizes(cfg *Config) (types.Sizes, error) { - return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) -} - -// roots selects the appropriate paths to walk based on the passed-in configuration, -// particularly the environment and the presence of a go.mod in cfg.Dir's parents. -func roots(cfg *Config) ([]gopathwalk.Root, string, error) { - stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") - if err != nil { - return nil, "", err - } - - fields := strings.Split(stdout.String(), "\n") - if len(fields) != 4 || len(fields[3]) != 0 { - return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) - } - goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] - var modDir string - if gomod != "" { - modDir = filepath.Dir(gomod) - } - - var roots []gopathwalk.Root - // Always add GOROOT. - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(goroot, "/src"), - Type: gopathwalk.RootGOROOT, - }) - // If modules are enabled, scan the module dir. - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: modDir, - Type: gopathwalk.RootCurrentModule, - }) - } - // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. - for _, p := range gopath { - if modDir != "" { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/pkg/mod"), - Type: gopathwalk.RootModuleCache, - }) - } else { - roots = append(roots, gopathwalk.Root{ - Path: filepath.Join(p, "/src"), - Type: gopathwalk.RootGOPATH, - }) - } - } - - return roots, modDir, nil -} - -// These functions were copied from goimports. See further documentation there. - -// pathMatchesQueries is adapted from pkgIsCandidate. -// TODO: is it reasonable to do Contains here, rather than an exact match on a path component? -func pathMatchesQueries(path string, queries []string) bool { - lastTwo := lastTwoComponents(path) - for _, query := range queries { - if strings.Contains(lastTwo, query) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) - if strings.Contains(lastTwo, query) { - return true - } - } - } - return false -} - -// lastTwoComponents returns at most the last two path components -// of v, using either / or \ as the path separator. -func lastTwoComponents(v string) string { - nslash := 0 - for i := len(v) - 1; i >= 0; i-- { - if v[i] == '/' || v[i] == '\\' { - nslash++ - if nslash == 2 { - return v[i:] - } - } - } - return v -} - -func hasHyphenOrUpperASCII(s string) bool { - for i := 0; i < len(s); i++ { - b := s[i] - if b == '-' || ('A' <= b && b <= 'Z') { - return true - } - } - return false -} - -func lowerASCIIAndRemoveHyphen(s string) (ret string) { - buf := make([]byte, 0, len(s)) - for i := 0; i < len(s); i++ { - b := s[i] - switch { - case b == '-': - continue - case 'A' <= b && b <= 'Z': - buf = append(buf, b+('a'-'A')) - default: - buf = append(buf, b) - } - } - return string(buf) + return response, nil } // Fields must match go list; @@ -710,6 +381,7 @@ type jsonPackage struct { Imports []string ImportMap map[string]string Deps []string + Module *packagesinternal.Module TestGoFiles []string TestImports []string XTestGoFiles []string @@ -730,10 +402,9 @@ func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } -// golistDriver uses the "go list" command to expand the pattern -// words and return metadata for the specified packages. dir may be -// "" and env may be nil, as per os/exec.Command. -func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { +// createDriverResponse uses the "go list" command to expand the pattern +// words and return a response for the specified packages. +func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -747,11 +418,13 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // Run "go list" for complete // information on the specified packages. - buf, err := invokeGo(cfg, golistargs(cfg, words)...) + buf, err := state.invokeGo("list", golistargs(state.cfg, words)...) if err != nil { return nil, err } seen := make(map[string]*jsonPackage) + pkgs := make(map[string]*Package) + additionalErrors := make(map[string][]Error) // Decode the JSON and convert it to Package form. var response driverResponse for dec := json.NewDecoder(buf); dec.More(); { @@ -782,18 +455,72 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv // contained in a known module or GOPATH entry. This will allow the package to be // properly "reclaimed" when overlays are processed. if filepath.IsAbs(p.ImportPath) && p.Error != nil { - pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) + pkgPath, ok, err := state.getPkgPath(p.ImportPath) + if err != nil { + return nil, err + } if ok { p.ImportPath = pkgPath } } if old, found := seen[p.ImportPath]; found { - if !reflect.DeepEqual(p, old) { - return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + // If one version of the package has an error, and the other doesn't, assume + // that this is a case where go list is reporting a fake dependency variant + // of the imported package: When a package tries to invalidly import another + // package, go list emits a variant of the imported package (with the same + // import path, but with an error on it, and the package will have a + // DepError set on it). An example of when this can happen is for imports of + // main packages: main packages can not be imported, but they may be + // separately matched and listed by another pattern. + // See golang.org/issue/36188 for more details. + + // The plan is that eventually, hopefully in Go 1.15, the error will be + // reported on the importing package rather than the duplicate "fake" + // version of the imported package. Once all supported versions of Go + // have the new behavior this logic can be deleted. + // TODO(matloob): delete the workaround logic once all supported versions of + // Go return the errors on the proper package. + + // There should be exactly one version of a package that doesn't have an + // error. + if old.Error == nil && p.Error == nil { + if !reflect.DeepEqual(p, old) { + return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) + } + continue } - // skip the duplicate - continue + + // Determine if this package's error needs to be bubbled up. + // This is a hack, and we expect for go list to eventually set the error + // on the package. + if old.Error != nil { + var errkind string + if strings.Contains(old.Error.Err, "not an importable package") { + errkind = "not an importable package" + } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") { + errkind = "use of internal package not allowed" + } + if errkind != "" { + if len(old.Error.ImportStack) < 2 { + return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack with fewer than two elements`, errkind) + } + importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-2] + additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{ + Pos: old.Error.Pos, + Msg: old.Error.Err, + Kind: ListError, + }) + } + } + + // Make sure that if there's a version of the package without an error, + // that's the one reported to the user. + if old.Error == nil { + continue + } + + // This package will replace the old one at the end of the loop. } seen[p.ImportPath] = p @@ -803,6 +530,8 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), + forTest: p.ForTest, + module: p.Module, } // Work around https://golang.org/issue/28749: @@ -879,35 +608,49 @@ func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driv } if p.Error != nil { + msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363. + // Address golang.org/issue/35964 by appending import stack to error message. + if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 { + msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack) + } pkg.Errors = append(pkg.Errors, Error{ - Pos: p.Error.Pos, - Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. + Pos: p.Error.Pos, + Msg: msg, + Kind: ListError, }) } + pkgs[pkg.ID] = pkg + } + + for id, errs := range additionalErrors { + if p, ok := pkgs[id]; ok { + p.Errors = append(p.Errors, errs...) + } + } + for _, pkg := range pkgs { response.Packages = append(response.Packages, pkg) } + sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID }) return &response, nil } // getPkgPath finds the package path of a directory if it's relative to a root directory. -func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { +func (state *golistState) getPkgPath(dir string) (string, bool, error) { absDir, err := filepath.Abs(dir) if err != nil { - cfg.Logf("error getting absolute path of %s: %v", dir, err) - return "", false + return "", false, err } - for rdir, rpath := range goInfo().rootDirs { - absRdir, err := filepath.Abs(rdir) - if err != nil { - cfg.Logf("error getting absolute path of %s: %v", rdir, err) - continue - } + roots, err := state.determineRootDirs() + if err != nil { + return "", false, err + } + + for rdir, rpath := range roots { // Make sure that the directory is in the module, // to avoid creating a path relative to another module. - if !strings.HasPrefix(absDir, absRdir) { - cfg.Logf("%s does not have prefix %s", absDir, absRdir) + if !strings.HasPrefix(absDir, rdir) { continue } // TODO(matloob): This doesn't properly handle symlinks. @@ -922,11 +665,11 @@ func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { // Once the file is saved, gopls, or the next invocation of the tool will get the correct // result straight from golist. // TODO(matloob): Implement module tiebreaking? - return path.Join(rpath, filepath.ToSlash(r)), true + return path.Join(rpath, filepath.ToSlash(r)), true, nil } - return filepath.ToSlash(r), true + return filepath.ToSlash(r), true, nil } - return "", false + return "", false, nil } // absJoin absolutizes and flattens the lists of files. @@ -945,8 +688,8 @@ func absJoin(dir string, fileses ...[]string) (res []string) { func golistargs(cfg *Config, words []string) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ - "list", "-e", "-json", - fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), + "-e", "-json", + fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), @@ -961,25 +704,20 @@ func golistargs(cfg *Config, words []string) []string { } // invokeGo returns the stdout of a go command invocation. -func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { - stdout := new(bytes.Buffer) - stderr := new(bytes.Buffer) - cmd := exec.CommandContext(cfg.Context, "go", args...) - // On darwin the cwd gets resolved to the real path, which breaks anything that - // expects the working directory to keep the original path, including the - // go command when dealing with modules. - // The Go stdlib has a special feature where if the cwd and the PWD are the - // same node then it trusts the PWD, so by setting it in the env for the child - // process we fix up all the paths returned by the go command. - cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) - cmd.Dir = cfg.Dir - cmd.Stdout = stdout - cmd.Stderr = stderr - defer func(start time.Time) { - cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) - }(time.Now()) - - if err := cmd.Run(); err != nil { +func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) { + cfg := state.cfg + + inv := &gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: cfg.BuildFlags, + Env: cfg.Env, + Logf: cfg.Logf, + WorkingDir: cfg.Dir, + } + + stdout, stderr, _, err := inv.RunRaw(cfg.Context) + if err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) @@ -989,7 +727,7 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { if !ok { // Catastrophic error: // - context cancellation - return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) + return nil, fmt.Errorf("couldn't run 'go': %v", err) } // Old go version? @@ -1016,7 +754,12 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) } if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { - if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { + msg := stderr.String()[len("# "):] + if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") { + return stdout, nil + } + // Treat pkg-config errors as a special case (golang.org/issue/36770). + if strings.HasPrefix(msg, "pkg-config") { return stdout, nil } } @@ -1105,16 +848,6 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) } } - - // As of writing, go list -export prints some non-fatal compilation - // errors to stderr, even with -e set. We would prefer that it put - // them in the Package.Error JSON (see https://golang.org/issue/26319). - // In the meantime, there's nowhere good to put them, but they can - // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS - // is set. - if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { - fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) - } return stdout, nil } diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index a7de62299d6ea..7974a6c9bb61c 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -1,12 +1,13 @@ package packages import ( - "bytes" "encoding/json" "fmt" "go/parser" "go/token" + "os" "path/filepath" + "sort" "strconv" "strings" ) @@ -16,7 +17,7 @@ import ( // sometimes incorrect. // TODO(matloob): Handle unsupported cases, including the following: // - determining the correct package to add given a new import path -func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) { +func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { havePkgs := make(map[string]string) // importPath -> non-test package ID needPkgsSet := make(map[string]bool) modifiedPkgsSet := make(map[string]bool) @@ -34,7 +35,23 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( // potentially modifying the transitive set of dependencies). var overlayAddsImports bool - for opath, contents := range cfg.Overlay { + // If both a package and its test package are created by the overlay, we + // need the real package first. Process all non-test files before test + // files, and make the whole process deterministic while we're at it. + var overlayFiles []string + for opath := range state.cfg.Overlay { + overlayFiles = append(overlayFiles, opath) + } + sort.Slice(overlayFiles, func(i, j int) bool { + iTest := strings.HasSuffix(overlayFiles[i], "_test.go") + jTest := strings.HasSuffix(overlayFiles[j], "_test.go") + if iTest != jTest { + return !iTest // non-tests are before tests. + } + return overlayFiles[i] < overlayFiles[j] + }) + for _, opath := range overlayFiles { + contents := state.cfg.Overlay[opath] base := filepath.Base(opath) dir := filepath.Dir(opath) var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant @@ -64,14 +81,8 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( testVariantOf = p continue nextPackage } + // We must have already seen the package of which this is a test variant. if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath { - // If we've already seen the test variant, - // make sure to label which package it is a test variant of. - if hasTestFiles(pkg) { - testVariantOf = p - continue nextPackage - } - // If we have already seen the package of which this is a test variant. if hasTestFiles(p) { testVariantOf = pkg } @@ -86,7 +97,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if pkg == nil { // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. - pkgPath, ok := getPkgPath(cfg, dir, rootDirs) + pkgPath, ok, err := state.getPkgPath(dir) + if err != nil { + return nil, nil, err + } if !ok { break } @@ -114,6 +128,11 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( if isTestFile && !isXTest && testVariantOf != nil { pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...) pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...) + // Add the package under test and its imports to the test variant. + pkg.forTest = testVariantOf.PkgPath + for k, v := range testVariantOf.Imports { + pkg.Imports[k] = &Package{ID: v.ID} + } } } } @@ -130,42 +149,45 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( continue } for _, imp := range imports { - _, found := pkg.Imports[imp] - if !found { - overlayAddsImports = true - // TODO(matloob): Handle cases when the following block isn't correct. - // These include imports of vendored packages, etc. - id, ok := havePkgs[imp] - if !ok { - id = imp - } - pkg.Imports[imp] = &Package{ID: id} - // Add dependencies to the non-test variant version of this package as wel. - if testVariantOf != nil { - testVariantOf.Imports[imp] = &Package{ID: id} + if _, found := pkg.Imports[imp]; found { + continue + } + overlayAddsImports = true + id, ok := havePkgs[imp] + if !ok { + var err error + id, err = state.resolveImport(dir, imp) + if err != nil { + return nil, nil, err } } + pkg.Imports[imp] = &Package{ID: id} + // Add dependencies to the non-test variant version of this package as well. + if testVariantOf != nil { + testVariantOf.Imports[imp] = &Package{ID: id} + } } - continue } - // toPkgPath tries to guess the package path given the id. - // This isn't always correct -- it's certainly wrong for - // vendored packages' paths. - toPkgPath := func(id string) string { - // TODO(matloob): Handle vendor paths. - i := strings.IndexByte(id, ' ') - if i >= 0 { - return id[:i] + // toPkgPath guesses the package path given the id. + toPkgPath := func(sourceDir, id string) (string, error) { + if i := strings.IndexByte(id, ' '); i >= 0 { + return state.resolveImport(sourceDir, id[:i]) } - return id + return state.resolveImport(sourceDir, id) } - // Do another pass now that new packages have been created to determine the - // set of missing packages. + // Now that new packages have been created, do another pass to determine + // the new set of missing packages. for _, pkg := range response.dr.Packages { for _, imp := range pkg.Imports { - pkgPath := toPkgPath(imp.ID) + if len(pkg.GoFiles) == 0 { + return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath) + } + pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID) + if err != nil { + return nil, nil, err + } if _, ok := havePkgs[pkgPath]; !ok { needPkgsSet[pkgPath] = true } @@ -185,6 +207,52 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func( return modifiedPkgs, needPkgs, err } +// resolveImport finds the the ID of a package given its import path. +// In particular, it will find the right vendored copy when in GOPATH mode. +func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) { + env, err := state.getEnv() + if err != nil { + return "", err + } + if env["GOMOD"] != "" { + return importPath, nil + } + + searchDir := sourceDir + for { + vendorDir := filepath.Join(searchDir, "vendor") + exists, ok := state.vendorDirs[vendorDir] + if !ok { + info, err := os.Stat(vendorDir) + exists = err == nil && info.IsDir() + state.vendorDirs[vendorDir] = exists + } + + if exists { + vendoredPath := filepath.Join(vendorDir, importPath) + if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() { + // We should probably check for .go files here, but shame on anyone who fools us. + path, ok, err := state.getPkgPath(vendoredPath) + if err != nil { + return "", err + } + if ok { + return path, nil + } + } + } + + // We know we've hit the top of the filesystem when we Dir / and get /, + // or C:\ and get C:\, etc. + next := filepath.Dir(searchDir) + if next == searchDir { + break + } + searchDir = next + } + return importPath, nil +} + func hasTestFiles(p *Package) bool { for _, f := range p.GoFiles { if strings.HasSuffix(f, "_test.go") { @@ -194,44 +262,59 @@ func hasTestFiles(p *Package) bool { return false } -// determineRootDirs returns a mapping from directories code can be contained in to the -// corresponding import path prefixes of those directories. -// Its result is used to try to determine the import path for a package containing -// an overlay file. -func determineRootDirs(cfg *Config) map[string]string { - // Assume modules first: - out, err := invokeGo(cfg, "list", "-m", "-json", "all") +// determineRootDirs returns a mapping from absolute directories that could +// contain code to their corresponding import path prefixes. +func (state *golistState) determineRootDirs() (map[string]string, error) { + env, err := state.getEnv() if err != nil { - return determineRootDirsGOPATH(cfg) + return nil, err + } + if env["GOMOD"] != "" { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsModules() + }) + } else { + state.rootsOnce.Do(func() { + state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH() + }) + } + return state.rootDirs, state.rootDirsError +} + +func (state *golistState) determineRootDirsModules() (map[string]string, error) { + out, err := state.invokeGo("list", "-m", "-json", "all") + if err != nil { + return nil, err } m := map[string]string{} type jsonMod struct{ Path, Dir string } for dec := json.NewDecoder(out); dec.More(); { mod := new(jsonMod) if err := dec.Decode(mod); err != nil { - return m // Give up and return an empty map. Package won't be found for overlay. + return nil, err } if mod.Dir != "" && mod.Path != "" { // This is a valid module; add it to the map. - m[mod.Dir] = mod.Path + absDir, err := filepath.Abs(mod.Dir) + if err != nil { + return nil, err + } + m[absDir] = mod.Path } } - return m + return m, nil } -func determineRootDirsGOPATH(cfg *Config) map[string]string { +func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) { m := map[string]string{} - out, err := invokeGo(cfg, "env", "GOPATH") - if err != nil { - // Could not determine root dir mapping. Everything is best-effort, so just return an empty map. - // When we try to find the import path for a directory, there will be no root-dir match and - // we'll give up. - return m - } - for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) { - m[filepath.Join(p, "src")] = "" + for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) { + absDir, err := filepath.Abs(dir) + if err != nil { + return nil, err + } + m[filepath.Join(absDir, "src")] = "" } - return m + return m, nil } func extractImports(filename string, contents []byte) ([]string, error) { diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go new file mode 100644 index 0000000000000..aff94a3fe913a --- /dev/null +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -0,0 +1,57 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package packages + +import ( + "fmt" + "strings" +) + +var allModes = []LoadMode{ + NeedName, + NeedFiles, + NeedCompiledGoFiles, + NeedImports, + NeedDeps, + NeedExportsFile, + NeedTypes, + NeedSyntax, + NeedTypesInfo, + NeedTypesSizes, +} + +var modeStrings = []string{ + "NeedName", + "NeedFiles", + "NeedCompiledGoFiles", + "NeedImports", + "NeedDeps", + "NeedExportsFile", + "NeedTypes", + "NeedSyntax", + "NeedTypesInfo", + "NeedTypesSizes", +} + +func (mod LoadMode) String() string { + m := mod + if m == 0 { + return fmt.Sprintf("LoadMode(0)") + } + var out []string + for i, x := range allModes { + if x > m { + break + } + if (m & x) != 0 { + out = append(out, modeStrings[i]) + m = m ^ x + } + } + if m != 0 { + out = append(out, "Unknown") + } + return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|")) +} diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 050cca43a2ba7..1ac6558c1d31e 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -23,6 +23,7 @@ import ( "sync" "golang.org/x/tools/go/gcexportdata" + "golang.org/x/tools/internal/packagesinternal" ) // A LoadMode controls the amount of detail to return when loading. @@ -34,6 +35,9 @@ import ( // Load may return more information than requested. type LoadMode int +// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to +// NeedExportFile to make it consistent with the Package field it's adding. + const ( // NeedName adds Name and PkgPath. NeedName LoadMode = 1 << iota @@ -51,7 +55,7 @@ const ( // NeedDeps adds the fields requested by the LoadMode in the packages in Imports. NeedDeps - // NeedExportsFile adds ExportsFile. + // NeedExportsFile adds ExportFile. NeedExportsFile // NeedTypes adds Types, Fset, and IllTyped. @@ -160,7 +164,7 @@ type Config struct { Tests bool // Overlay provides a mapping of absolute file paths to file contents. - // If the file with the given path already exists, the parser will use the + // If the file with the given path already exists, the parser will use the // alternative file contents provided by the map. // // Overlays provide incomplete support for when a given file doesn't @@ -292,6 +296,21 @@ type Package struct { // TypesSizes provides the effective size function for types in TypesInfo. TypesSizes types.Sizes + + // forTest is the package under test, if any. + forTest string + + // module is the module information for the package if it exists. + module *packagesinternal.Module +} + +func init() { + packagesinternal.GetForTest = func(p interface{}) string { + return p.(*Package).forTest + } + packagesinternal.GetModule = func(p interface{}) *packagesinternal.Module { + return p.(*Package).module + } } // An Error describes a problem with a package's metadata, syntax, or types. @@ -500,12 +519,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) { if i, found := rootMap[pkg.ID]; found { rootIndex = i } + + // Overlays can invalidate export data. + // TODO(matloob): make this check fine-grained based on dependencies on overlaid files + exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe" + // This package needs type information if the caller requested types and the package is + // either a root, or it's a non-root and the user requested dependencies ... + needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) + // This package needs source if the call requested source (or types info, which implies source) + // and the package is either a root, or itas a non- root and the user requested dependencies... + needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) || + // ... or if we need types and the exportData is invalid. We fall back to (incompletely) + // typechecking packages from source if they fail to compile. + (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe" lpkg := &loaderPackage{ Package: pkg, - needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0, - needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 || - len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files - pkg.ExportFile == "" && pkg.PkgPath != "unsafe", + needtypes: needtypes, + needsrc: needsrc, } ld.pkgs[lpkg.ID] = lpkg if rootIndex >= 0 { @@ -713,7 +743,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) { // which would then require that such created packages be explicitly // inserted back into the Import graph as a final step after export data loading. // The Diamond test exercises this case. - if !lpkg.needtypes { + if !lpkg.needtypes && !lpkg.needsrc { return } if !lpkg.needsrc { diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go index 7219c8e9ff1f1..9887f7e7a016e 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go @@ -14,14 +14,14 @@ import ( "sync" ) -// TraverseLink is used as a return value from WalkFuncs to indicate that the +// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the // symlink named in the call may be traversed. -var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") +var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory") -// SkipFiles is a used as a return value from WalkFuncs to indicate that the +// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the // callback should not be called for any other files in the current directory. // Child directories will still be traversed. -var SkipFiles = errors.New("fastwalk: skip remaining files in directory") +var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory") // Walk is a faster implementation of filepath.Walk. // @@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error { err := w.fn(joined, typ) if typ == os.ModeSymlink { - if err == TraverseLink { + if err == ErrTraverseLink { // Set callbackDone so we don't call it twice for both the // symlink-as-symlink and the symlink-as-directory later: w.enqueue(walkItem{dir: joined, callbackDone: true}) diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go index a906b87595ba0..b0d6327a9e62b 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go @@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } diff --git a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go index 3369b1a0b2de1..5901a8f61608a 100644 --- a/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go +++ b/vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go @@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e continue } if err := fn(dirName, name, typ); err != nil { - if err == SkipFiles { + if err == ErrSkipFiles { skipFiles = true continue } @@ -76,8 +76,9 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e } func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) { - // golang.org/issue/15653 - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) + // golang.org/issue/37269 + dirent := &syscall.Dirent{} + copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf) if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v { panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v)) } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go new file mode 100644 index 0000000000000..75d73e744fd58 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -0,0 +1,121 @@ +// Package gocommand is a helper for calling the go command. +package gocommand + +import ( + "bytes" + "context" + "fmt" + "os" + "os/exec" + "strings" + "time" +) + +// An Invocation represents a call to the go command. +type Invocation struct { + Verb string + Args []string + BuildFlags []string + Env []string + WorkingDir string + Logf func(format string, args ...interface{}) +} + +// Run runs the invocation, returning its stdout and an error suitable for +// human consumption, including stderr. +func (i *Invocation) Run(ctx context.Context) (*bytes.Buffer, error) { + stdout, _, friendly, _ := i.RunRaw(ctx) + return stdout, friendly +} + +// RunRaw is like Run, but also returns the raw stderr and error for callers +// that want to do low-level error handling/recovery. +func (i *Invocation) RunRaw(ctx context.Context) (stdout *bytes.Buffer, stderr *bytes.Buffer, friendlyError error, rawError error) { + log := i.Logf + if log == nil { + log = func(string, ...interface{}) {} + } + + goArgs := []string{i.Verb} + switch i.Verb { + case "mod": + // mod needs the sub-verb before build flags. + goArgs = append(goArgs, i.Args[0]) + goArgs = append(goArgs, i.BuildFlags...) + goArgs = append(goArgs, i.Args[1:]...) + case "env": + // env doesn't take build flags. + goArgs = append(goArgs, i.Args...) + default: + goArgs = append(goArgs, i.BuildFlags...) + goArgs = append(goArgs, i.Args...) + } + cmd := exec.Command("go", goArgs...) + stdout = &bytes.Buffer{} + stderr = &bytes.Buffer{} + cmd.Stdout = stdout + cmd.Stderr = stderr + // On darwin the cwd gets resolved to the real path, which breaks anything that + // expects the working directory to keep the original path, including the + // go command when dealing with modules. + // The Go stdlib has a special feature where if the cwd and the PWD are the + // same node then it trusts the PWD, so by setting it in the env for the child + // process we fix up all the paths returned by the go command. + cmd.Env = append(append([]string{}, i.Env...), "PWD="+i.WorkingDir) + cmd.Dir = i.WorkingDir + + defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + + rawError = runCmdContext(ctx, cmd) + friendlyError = rawError + if rawError != nil { + // Check for 'go' executable not being found. + if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound { + friendlyError = fmt.Errorf("go command required, not found: %v", ee) + } + if ctx.Err() != nil { + friendlyError = ctx.Err() + } + friendlyError = fmt.Errorf("err: %v: stderr: %s", rawError, stderr) + } + return +} + +// runCmdContext is like exec.CommandContext except it sends os.Interrupt +// before os.Kill. +func runCmdContext(ctx context.Context, cmd *exec.Cmd) error { + if err := cmd.Start(); err != nil { + return err + } + resChan := make(chan error, 1) + go func() { + resChan <- cmd.Wait() + }() + + select { + case err := <-resChan: + return err + case <-ctx.Done(): + } + // Cancelled. Interrupt and see if it ends voluntarily. + cmd.Process.Signal(os.Interrupt) + select { + case err := <-resChan: + return err + case <-time.After(time.Second): + } + // Didn't shut down in response to interrupt. Kill it hard. + cmd.Process.Kill() + return <-resChan +} + +func cmdDebugStr(cmd *exec.Cmd) string { + env := make(map[string]string) + for _, kv := range cmd.Env { + split := strings.Split(kv, "=") + k, v := split[0], split[1] + env[k] = v + } + + return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) +} diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 9a61bdbf5ddca..390cb9db795ab 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -23,8 +23,10 @@ import ( // Options controls the behavior of a Walk call. type Options struct { - Debug bool // Enable debug logging - ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules. + // If Logf is non-nil, debug logging is enabled through this function. + Logf func(format string, args ...interface{}) + // Search module caches. Also disables legacy goimports ignore rules. + ModulesEnabled bool } // RootType indicates the type of a Root. @@ -77,16 +79,17 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root } } +// walkDir creates a walker and starts fastwalk with this walker. func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) { if _, err := os.Stat(root.Path); os.IsNotExist(err) { - if opts.Debug { - log.Printf("skipping nonexistent directory: %v", root.Path) + if opts.Logf != nil { + opts.Logf("skipping nonexistent directory: %v", root.Path) } return } start := time.Now() - if opts.Debug { - log.Printf("gopathwalk: scanning %s", root.Path) + if opts.Logf != nil { + opts.Logf("gopathwalk: scanning %s", root.Path) } w := &walker{ root: root, @@ -99,8 +102,8 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err) } - if opts.Debug { - log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) + if opts.Logf != nil { + opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start)) } } @@ -114,7 +117,7 @@ type walker struct { ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files. } -// init initializes the walker based on its Options. +// init initializes the walker based on its Options func (w *walker) init() { var ignoredPaths []string if w.root.Type == RootModuleCache { @@ -129,11 +132,11 @@ func (w *walker) init() { full := filepath.Join(w.root.Path, p) if fi, err := os.Stat(full); err == nil { w.ignoredDirs = append(w.ignoredDirs, fi) - if w.opts.Debug { - log.Printf("Directory added to ignore list: %s", full) + if w.opts.Logf != nil { + w.opts.Logf("Directory added to ignore list: %s", full) } - } else if w.opts.Debug { - log.Printf("Error statting ignored directory: %v", err) + } else if w.opts.Logf != nil { + w.opts.Logf("Error statting ignored directory: %v", err) } } } @@ -144,11 +147,11 @@ func (w *walker) init() { func (w *walker) getIgnoredDirs(path string) []string { file := filepath.Join(path, ".goimportsignore") slurp, err := ioutil.ReadFile(file) - if w.opts.Debug { + if w.opts.Logf != nil { if err != nil { - log.Print(err) + w.opts.Logf("%v", err) } else { - log.Printf("Read %s", file) + w.opts.Logf("Read %s", file) } } if err != nil { @@ -167,6 +170,7 @@ func (w *walker) getIgnoredDirs(path string) []string { return ignoredDirs } +// shouldSkipDir reports whether the file should be skipped or not. func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { for _, ignoredDir := range w.ignoredDirs { if os.SameFile(fi, ignoredDir) { @@ -180,20 +184,21 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool { return false } +// walk walks through the given path. func (w *walker) walk(path string, typ os.FileMode) error { dir := filepath.Dir(path) if typ.IsRegular() { if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) { // Doesn't make sense to have regular files // directly in your $GOPATH/src or $GOROOT/src. - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if !strings.HasSuffix(path, ".go") { return nil } w.add(w.root, dir) - return fastwalk.SkipFiles + return fastwalk.ErrSkipFiles } if typ == os.ModeDir { base := filepath.Base(path) @@ -221,7 +226,7 @@ func (w *walker) walk(path string, typ os.FileMode) error { return nil } if w.shouldTraverse(dir, fi) { - return fastwalk.TraverseLink + return fastwalk.ErrTraverseLink } } return nil diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index cdaa57b9bde4a..92a23439ff657 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -14,7 +14,6 @@ import ( "go/token" "io/ioutil" "os" - "os/exec" "path" "path/filepath" "reflect" @@ -22,12 +21,11 @@ import ( "strconv" "strings" "sync" - "time" "unicode" "unicode/utf8" "golang.org/x/tools/go/ast/astutil" - "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/gocommand" "golang.org/x/tools/internal/gopathwalk" ) @@ -82,7 +80,8 @@ type ImportFix struct { // IdentName is the identifier that this fix will add or remove. IdentName string // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). - FixType ImportFixType + FixType ImportFixType + Relevance int // see pkg } // An ImportInfo represents a single import statement. @@ -263,7 +262,7 @@ type pass struct { // loadPackageNames saves the package names for everything referenced by imports. func (p *pass) loadPackageNames(imports []*ImportInfo) error { - if p.env.Debug { + if p.env.Logf != nil { p.env.Logf("loading package names for %v packages", len(imports)) defer func() { p.env.Logf("done loading package names for %v packages", len(imports)) @@ -302,7 +301,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string { if known != nil && known.name != "" { return known.name } - return importPathToAssumedName(imp.ImportPath) + return ImportPathToAssumedName(imp.ImportPath) } // load reads in everything necessary to run a pass, and reports whether the @@ -335,7 +334,7 @@ func (p *pass) load() ([]*ImportFix, bool) { if p.loadRealPackageNames { err := p.loadPackageNames(append(imports, p.candidates...)) if err != nil { - if p.env.Debug { + if p.env.Logf != nil { p.env.Logf("loading package names: %v", err) } return nil, false @@ -435,7 +434,7 @@ func (p *pass) importSpecName(imp *ImportInfo) string { } ident := p.importIdentifier(imp) - if ident == importPathToAssumedName(imp.ImportPath) { + if ident == ImportPathToAssumedName(imp.ImportPath) { return "" // ident not needed since the assumed and real names are the same. } return ident @@ -529,7 +528,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return nil, err } srcDir := filepath.Dir(abs) - if env.Debug { + if env.Logf != nil { env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir) } @@ -537,7 +536,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv // derive package names from import paths, see if the file is already // complete. We can't add any imports yet, because we don't know // if missing references are actually package vars. - p := &pass{fset: fset, f: f, srcDir: srcDir} + p := &pass{fset: fset, f: f, srcDir: srcDir, env: env} if fixes, done := p.load(); done { return fixes, nil } @@ -559,8 +558,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv } // Third pass: get real package names where we had previously used - // the naive algorithm. This is the first step that will use the - // environment, so we provide it here for the first time. + // the naive algorithm. p = &pass{fset: fset, f: f, srcDir: srcDir, env: env} p.loadRealPackageNames = true p.otherFiles = otherFiles @@ -585,89 +583,127 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv return fixes, nil } -// getCandidatePkgs returns the list of pkgs that are accessible from filename, -// optionall filtered to only packages named pkgName. -func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) { - // TODO(heschi): filter out current package. (Don't forget x_test can import x.) +// Highest relevance, used for the standard library. Chosen arbitrarily to +// match pre-existing gopls code. +const MaxRelevance = 7 - var result []*pkg +// getCandidatePkgs works with the passed callback to find all acceptable packages. +// It deduplicates by import path, and uses a cached stdlib rather than reading +// from disk. +func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error { + notSelf := func(p *pkg) bool { + return p.packageName != filePkg || p.dir != filepath.Dir(filename) + } // Start off with the standard library. - for importPath := range stdlib { - if pkgName != "" && path.Base(importPath) != pkgName { - continue - } - result = append(result, &pkg{ + for importPath, exports := range stdlib { + p := &pkg{ dir: filepath.Join(env.GOROOT, "src", importPath), importPathShort: importPath, packageName: path.Base(importPath), - relevance: 0, - }) - } - - // Exclude goroot results -- getting them is relatively expensive, not cached, - // and generally redundant with the in-memory version. - exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT} - // Only the go/packages resolver uses the first argument, and nobody uses that resolver. - scannedPkgs, err := env.GetResolver().scan(nil, true, exclude) - if err != nil { - return nil, err + relevance: MaxRelevance, + } + if notSelf(p) && wrappedCallback.packageNameLoaded(p) { + wrappedCallback.exportsLoaded(p, exports) + } } + var mu sync.Mutex dupCheck := map[string]struct{}{} - for _, pkg := range scannedPkgs { - if pkgName != "" && pkg.packageName != pkgName { - continue - } - if !canUse(filename, pkg.dir) { - continue - } - if _, ok := dupCheck[pkg.importPathShort]; ok { - continue - } - dupCheck[pkg.importPathShort] = struct{}{} - result = append(result, pkg) + + scanFilter := &scanCallback{ + rootFound: func(root gopathwalk.Root) bool { + // Exclude goroot results -- getting them is relatively expensive, not cached, + // and generally redundant with the in-memory version. + return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root) + }, + dirFound: wrappedCallback.dirFound, + packageNameLoaded: func(pkg *pkg) bool { + mu.Lock() + defer mu.Unlock() + if _, ok := dupCheck[pkg.importPathShort]; ok { + return false + } + dupCheck[pkg.importPathShort] = struct{}{} + return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg) + }, + exportsLoaded: func(pkg *pkg, exports []string) { + // If we're an x_test, load the package under test's test variant. + if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) { + var err error + _, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true) + if err != nil { + return + } + } + wrappedCallback.exportsLoaded(pkg, exports) + }, } + return env.GetResolver().scan(ctx, scanFilter) +} - // Sort first by relevance, then by package name, with import path as a tiebreaker. - sort.Slice(result, func(i, j int) bool { - pi, pj := result[i], result[j] - if pi.relevance != pj.relevance { - return pi.relevance < pj.relevance - } - if pi.packageName != pj.packageName { - return pi.packageName < pj.packageName - } - return pi.importPathShort < pj.importPathShort - }) +func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int { + result := make(map[string]int) + for _, path := range paths { + result[path] = env.GetResolver().scoreImportPath(ctx, path) + } + return result +} - return result, nil +func PrimeCache(ctx context.Context, env *ProcessEnv) error { + // Fully scan the disk for directories, but don't actually read any Go files. + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return false + }, + packageNameLoaded: func(pkg *pkg) bool { + return false + }, + } + return getCandidatePkgs(ctx, callback, "", "", env) } func candidateImportName(pkg *pkg) string { - if importPathToAssumedName(pkg.importPathShort) != pkg.packageName { + if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName { return pkg.packageName } return "" } // getAllCandidates gets all of the candidates to be imported, regardless of if they are needed. -func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) { - pkgs, err := getCandidatePkgs("", filename, env) - if err != nil { - return nil, err - } - result := make([]ImportFix, 0, len(pkgs)) - for _, pkg := range pkgs { - result = append(result, ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - }) +func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + if !canUse(filename, pkg.dir) { + return false + } + // Try the assumed package name first, then a simpler path match + // in case of packages named vN, which are not uncommon. + return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) || + strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix) + }, + packageNameLoaded: func(pkg *pkg) bool { + if !strings.HasPrefix(pkg.packageName, searchPrefix) { + return false + } + wrapped(ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }) + return false + }, } - return result, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // A PackageExport is a package and its exports. @@ -676,64 +712,61 @@ type PackageExport struct { Exports []string } -func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) { - pkgs, err := getCandidatePkgs(completePackage, filename, env) - if err != nil { - return nil, err - } - - results := make([]PackageExport, 0, len(pkgs)) - for _, pkg := range pkgs { - fix := &ImportFix{ - StmtInfo: ImportInfo{ - ImportPath: pkg.importPathShort, - Name: candidateImportName(pkg), - }, - IdentName: pkg.packageName, - FixType: AddImport, - } - var exports []string - if e, ok := stdlib[pkg.importPathShort]; ok { - exports = e - } else { - exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg) - if err != nil { - if env.Debug { - env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err) - } - continue - } - } - sort.Strings(exports) - results = append(results, PackageExport{ - Fix: fix, - Exports: exports, - }) +func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error { + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, references{searchPkg: nil}, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + return pkg.packageName == searchPkg + }, + exportsLoaded: func(pkg *pkg, exports []string) { + sort.Strings(exports) + wrapped(PackageExport{ + Fix: &ImportFix{ + StmtInfo: ImportInfo{ + ImportPath: pkg.importPathShort, + Name: candidateImportName(pkg), + }, + IdentName: pkg.packageName, + FixType: AddImport, + Relevance: pkg.relevance, + }, + Exports: exports, + }) + }, } - - return results, nil + return getCandidatePkgs(ctx, callback, filename, filePkg, env) } // ProcessEnv contains environment variables and settings that affect the use of // the go command, the go/build package, etc. type ProcessEnv struct { LocalPrefix string - Debug bool + + BuildFlags []string // If non-empty, these will be used instead of the // process-wide values. GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string WorkingDir string - // If true, use go/packages regardless of the environment. - ForceGoPackages bool - - // Logf is the default logger for the ProcessEnv. + // If Logf is non-nil, debug logging is enabled through this function. Logf func(format string, args ...interface{}) resolver Resolver } +// CopyConfig copies the env's configuration into a new env. +func (e *ProcessEnv) CopyConfig() *ProcessEnv { + copy := *e + copy.resolver = nil + return © +} + func (e *ProcessEnv) env() []string { env := os.Environ() add := func(k, v string) { @@ -757,73 +790,55 @@ func (e *ProcessEnv) GetResolver() Resolver { if e.resolver != nil { return e.resolver } - if e.ForceGoPackages { - e.resolver = &goPackagesResolver{env: e} - return e.resolver - } - - out, err := e.invokeGo("env", "GOMOD") + out, err := e.invokeGo(context.TODO(), "env", "GOMOD") if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 { - e.resolver = &gopathResolver{env: e} + e.resolver = newGopathResolver(e) return e.resolver } - e.resolver = &ModuleResolver{env: e} + e.resolver = newModuleResolver(e) return e.resolver } -func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config { - return &packages.Config{ - Mode: mode, - Dir: e.WorkingDir, - Env: e.env(), - } -} - func (e *ProcessEnv) buildContext() *build.Context { ctx := build.Default ctx.GOROOT = e.GOROOT ctx.GOPATH = e.GOPATH - // As of Go 1.14, build.Context has a WorkingDir field + // As of Go 1.14, build.Context has a Dir field // (see golang.org/issue/34860). // Populate it only if present. - if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String { - wd.SetString(e.WorkingDir) + rc := reflect.ValueOf(&ctx).Elem() + dir := rc.FieldByName("Dir") + if !dir.IsValid() { + // Working drafts of Go 1.14 named the field "WorkingDir" instead. + // TODO(bcmills): Remove this case after the Go 1.14 beta has been released. + dir = rc.FieldByName("WorkingDir") } - return &ctx -} - -func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) { - cmd := exec.Command("go", args...) - stdout := &bytes.Buffer{} - stderr := &bytes.Buffer{} - cmd.Stdout = stdout - cmd.Stderr = stderr - cmd.Env = e.env() - cmd.Dir = e.WorkingDir - - if e.Debug { - defer func(start time.Time) { e.Logf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now()) + if dir.IsValid() && dir.Kind() == reflect.String { + dir.SetString(e.WorkingDir) } - if err := cmd.Run(); err != nil { - return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr) - } - return stdout, nil + + return &ctx } -func cmdDebugStr(cmd *exec.Cmd) string { - env := make(map[string]string) - for _, kv := range cmd.Env { - split := strings.Split(kv, "=") - k, v := split[0], split[1] - env[k] = v +func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) { + inv := gocommand.Invocation{ + Verb: verb, + Args: args, + BuildFlags: e.BuildFlags, + Env: e.env(), + Logf: e.Logf, + WorkingDir: e.WorkingDir, } - - return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args) + return inv.Run(ctx) } func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { + // Prevent self-imports. + if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir { + return + } exports := copyExports(stdlib[pkg]) pass.addCandidate( &ImportInfo{ImportPath: pkg}, @@ -848,94 +863,65 @@ func addStdlibCandidates(pass *pass, refs references) { type Resolver interface { // loadPackageNames loads the package names in importPaths. loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) - // scan finds (at least) the packages satisfying refs. If loadNames is true, - // package names will be set on the results, and dirs whose package name - // could not be determined will be excluded. - scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) + // scan works with callback to search for packages. See scanCallback for details. + scan(ctx context.Context, callback *scanCallback) error // loadExports returns the set of exported symbols in the package at dir. // loadExports may be called concurrently. - loadExports(ctx context.Context, pkg *pkg) (string, []string, error) + loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) + // scoreImportPath returns the relevance for an import path. + scoreImportPath(ctx context.Context, path string) int ClearForNewScan() } -// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages. -type goPackagesResolver struct { - env *ProcessEnv +// A scanCallback controls a call to scan and receives its results. +// In general, minor errors will be silently discarded; a user should not +// expect to receive a full series of calls for everything. +type scanCallback struct { + // rootFound is called before scanning a new root dir. If it returns true, + // the root will be scanned. Returning false will not necessarily prevent + // directories from that root making it to dirFound. + rootFound func(gopathwalk.Root) bool + // dirFound is called when a directory is found that is possibly a Go package. + // pkg will be populated with everything except packageName. + // If it returns true, the package's name will be loaded. + dirFound func(pkg *pkg) bool + // packageNameLoaded is called when a package is found and its name is loaded. + // If it returns true, the package's exports will be loaded. + packageNameLoaded func(pkg *pkg) bool + // exportsLoaded is called when a package's exports have been loaded. + exportsLoaded func(pkg *pkg, exports []string) } -func (r *goPackagesResolver) ClearForNewScan() {} - -func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - if len(importPaths) == 0 { - return nil, nil - } - cfg := r.env.newPackagesConfig(packages.LoadFiles) - pkgs, err := packages.Load(cfg, importPaths...) - if err != nil { - return nil, err - } - names := map[string]string{} - for _, pkg := range pkgs { - names[VendorlessPath(pkg.PkgPath)] = pkg.Name - } - // We may not have found all the packages. Guess the rest. - for _, path := range importPaths { - if _, ok := names[path]; ok { - continue - } - names[path] = importPathToAssumedName(path) - } - return names, nil - -} - -func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) { - var loadQueries []string - for pkgName := range refs { - loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName) - } - sort.Strings(loadQueries) - cfg := r.env.newPackagesConfig(packages.LoadFiles) - goPackages, err := packages.Load(cfg, loadQueries...) - if err != nil { - return nil, err - } - - var scan []*pkg - for _, goPackage := range goPackages { - scan = append(scan, &pkg{ - dir: filepath.Dir(goPackage.CompiledGoFiles[0]), - importPathShort: VendorlessPath(goPackage.PkgPath), - goPackage: goPackage, - packageName: goPackage.Name, - }) - } - return scan, nil -} - -func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - if pkg.goPackage == nil { - return "", nil, fmt.Errorf("goPackage not set") - } - var exports []string - fset := token.NewFileSet() - for _, fname := range pkg.goPackage.CompiledGoFiles { - f, err := parser.ParseFile(fset, fname, nil, 0) - if err != nil { - return "", nil, fmt.Errorf("parsing %s: %v", fname, err) - } - for name := range f.Scope.Objects { - if ast.IsExported(name) { - exports = append(exports, name) +func addExternalCandidates(pass *pass, refs references, filename string) error { + var mu sync.Mutex + found := make(map[string][]pkgDistance) + callback := &scanCallback{ + rootFound: func(gopathwalk.Root) bool { + return true // We want everything. + }, + dirFound: func(pkg *pkg) bool { + return pkgIsCandidate(filename, refs, pkg) + }, + packageNameLoaded: func(pkg *pkg) bool { + if _, want := refs[pkg.packageName]; !want { + return false } - } + if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName { + // The candidate is in the same directory and has the + // same package name. Don't try to import ourselves. + return false + } + if !canUse(filename, pkg.dir) { + return false + } + mu.Lock() + defer mu.Unlock() + found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)}) + return false // We'll do our own loading after we sort. + }, } - return pkg.goPackage.Name, exports, nil -} - -func addExternalCandidates(pass *pass, refs references, filename string) error { - dirScan, err := pass.env.GetResolver().scan(refs, false, nil) + err := pass.env.GetResolver().scan(context.Background(), callback) if err != nil { return err } @@ -962,7 +948,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { go func(pkgName string, symbols map[string]bool) { defer wg.Done() - found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename) + found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename) if err != nil { firstErrOnce.Do(func() { @@ -1006,7 +992,7 @@ func notIdentifier(ch rune) bool { ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch))) } -// importPathToAssumedName returns the assumed package name of an import path. +// ImportPathToAssumedName returns the assumed package name of an import path. // It does this using only string parsing of the import path. // It picks the last element of the path that does not look like a major // version, and then picks the valid identifier off the start of that element. @@ -1014,7 +1000,7 @@ func notIdentifier(ch rune) bool { // clarity. // This function could be moved to a standard package and exported if we want // for use in other tools. -func importPathToAssumedName(importPath string) string { +func ImportPathToAssumedName(importPath string) string { base := path.Base(importPath) if strings.HasPrefix(base, "v") { if _, err := strconv.Atoi(base[1:]); err == nil { @@ -1033,24 +1019,36 @@ func importPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { - env *ProcessEnv - cache *dirInfoCache + env *ProcessEnv + walked bool + cache *dirInfoCache + scanSema chan struct{} // scanSema prevents concurrent scans. } -func (r *gopathResolver) init() { - if r.cache == nil { - r.cache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, - } +func newGopathResolver(env *ProcessEnv) *gopathResolver { + r := &gopathResolver{ + env: env, + cache: &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + }, + scanSema: make(chan struct{}, 1), } + r.scanSema <- struct{}{} + return r } func (r *gopathResolver) ClearForNewScan() { - r.cache = nil + <-r.scanSema + r.cache = &dirInfoCache{ + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, + } + r.walked = false + r.scanSema <- struct{}{} } func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { - r.init() names := map[string]string{} for _, path := range importPaths { names[path] = importPathToName(r.env, path, srcDir) @@ -1130,7 +1128,6 @@ func packageDirToName(dir string) (packageName string, err error) { } type pkg struct { - goPackage *packages.Package dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http") importPathShort string // vendorless import path ("net/http", "a/b") packageName string // package name loaded from source if requested @@ -1178,8 +1175,7 @@ func distance(basepath, targetpath string) int { return strings.Count(p, string(filepath.Separator)) + 1 } -func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { - r.init() +func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error { add := func(root gopathwalk.Root, dir string) { // We assume cached directories have not changed. We can skip them and their // children. @@ -1196,56 +1192,84 @@ func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk } r.cache.Store(dir, info) } - roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude) - gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false}) - var result []*pkg - for _, dir := range r.cache.Keys() { - info, ok := r.cache.Load(dir) - if !ok { - continue - } - if loadNames { - var err error - info, err = r.cache.CachePackageName(info) - if err != nil { - continue - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return } p := &pkg{ importPathShort: info.nonCanonicalImportPath, - dir: dir, - relevance: 1, - packageName: info.packageName, + dir: info.dir, + relevance: MaxRelevance - 1, } if info.rootType == gopathwalk.RootGOROOT { - p.relevance = 0 + p.relevance = MaxRelevance + } + + if !callback.dirFound(p) { + return } - result = append(result, p) + var err error + p.packageName, err = r.cache.CachePackageName(info) + if err != nil { + return + } + + if !callback.packageNameLoaded(p) { + return + } + if _, exports, err := r.loadExports(ctx, p, false); err == nil { + callback.exportsLoaded(p, exports) + } + } + stop := r.cache.ScanAndListen(ctx, processDir) + defer stop() + // The callback is not necessarily safe to use in the goroutine below. Process roots eagerly. + roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false}) + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: } - return result, nil + return nil } -func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root { +func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance + } + return MaxRelevance - 1 +} + +func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root { var result []gopathwalk.Root -outer: for _, root := range roots { - for _, i := range exclude { - if i == root.Type { - continue outer - } + if !include(root) { + continue } result = append(result, root) } return result } -func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { - r.init() - if info, ok := r.cache.Load(pkg.dir); ok { +func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { + if info, ok := r.cache.Load(pkg.dir); ok && !includeTest { return r.cache.CacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } // VendorlessPath returns the devendorized version of the import path ipath. @@ -1261,7 +1285,7 @@ func VendorlessPath(ipath string) string { return ipath } -func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) { +func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) { var exports []string // Look for non-test, buildable .go files which could provide exports. @@ -1272,7 +1296,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str var files []os.FileInfo for _, fi := range all { name := fi.Name() - if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) { continue } match, err := env.buildContext().MatchFile(dir, fi.Name()) @@ -1305,6 +1329,10 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // handled by MatchFile above. continue } + if includeTest && strings.HasSuffix(f.Name.Name, "_test") { + // x_test package. We want internal test files only. + continue + } pkgName = f.Name.Name for name := range f.Scope.Objects { if ast.IsExported(name) { @@ -1313,7 +1341,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str } } - if env.Debug { + if env.Logf != nil { sortedExports := append([]string(nil), exports...) sort.Strings(sortedExports) env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", ")) @@ -1323,42 +1351,19 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str // findImport searches for a package with the given symbols. // If no package is found, findImport returns ("", false, nil) -func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { - pkgDir, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - pkgDir = filepath.Dir(pkgDir) - - // Find candidate packages, looking only at their directory names first. - var candidates []pkgDistance - for _, pkg := range dirScan { - if pkg.dir == pkgDir && pass.f.Name.Name == pkgName { - // The candidate is in the same directory and has the - // same package name. Don't try to import ourselves. - continue - } - if pkgIsCandidate(filename, pkgName, pkg) { - candidates = append(candidates, pkgDistance{ - pkg: pkg, - distance: distance(pkgDir, pkg.dir), - }) - } - } - +func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) { // Sort the candidates by their import package length, // assuming that shorter package names are better than long // ones. Note that this sorts by the de-vendored name, so // there's no "penalty" for vendoring. sort.Sort(byDistanceOrImportPathShortLength(candidates)) - if pass.env.Debug { + if pass.env.Logf != nil { for i, c := range candidates { pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir) } } // Collect exports for packages with matching names. - rescv := make([]chan *pkg, len(candidates)) for i := range candidates { rescv[i] = make(chan *pkg, 1) @@ -1390,12 +1395,14 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, wg.Done() }() - if pass.env.Debug { + if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName) } - exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg) + // If we're an x_test, load the package under test's test variant. + includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir + _, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest) if err != nil { - if pass.env.Debug { + if pass.env.Logf != nil { pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err) } resc <- nil @@ -1430,17 +1437,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, return nil, nil } -func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) { - pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg) - if err != nil { - return nil, err - } - if expectPkg != pkgName { - return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg) - } - return exports, err -} - // pkgIsCandidate reports whether pkg is a candidate for satisfying the // finding which package pkgIdent in the file named by filename is trying // to refer to. @@ -1453,7 +1449,7 @@ func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg strin // filename is the file being formatted. // pkgIdent is the package being searched for, like "client" (if // searching for "client.New") -func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { +func pkgIsCandidate(filename string, refs references, pkg *pkg) bool { // Check "internal" and "vendor" visibility: if !canUse(filename, pkg.dir) { return false @@ -1471,17 +1467,18 @@ func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool { // "bar", which is strongly discouraged // anyway. There's no reason goimports needs // to be slow just to accommodate that. - lastTwo := lastTwoComponents(pkg.importPathShort) - if strings.Contains(lastTwo, pkgIdent) { - return true - } - if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { - lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + for pkgIdent := range refs { + lastTwo := lastTwoComponents(pkg.importPathShort) if strings.Contains(lastTwo, pkgIdent) { return true } + if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) { + lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) + if strings.Contains(lastTwo, pkgIdent) { + return true + } + } } - return false } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index ed3867bb59402..b18daea2905d5 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -11,6 +11,7 @@ package imports import ( "bufio" "bytes" + "context" "fmt" "go/ast" "go/build" @@ -20,7 +21,7 @@ import ( "go/token" "io" "io/ioutil" - "log" + "os" "regexp" "strconv" "strings" @@ -83,42 +84,54 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, return getFixes(fileSet, file, filename, opt.Env) } -// ApplyFix will apply all of the fixes to the file and format it. -func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) { +// ApplyFixes applies all of the fixes to the file and formats it. extraMode +// is added in when parsing the file. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) { src, opt, err = initialize(filename, src, opt) if err != nil { return nil, err } + // Don't use parse() -- we don't care about fragments or statement lists + // here, and we need to work with unparseable files. fileSet := token.NewFileSet() - file, adjust, err := parse(fileSet, filename, src, opt) - if err != nil { + parserMode := parser.Mode(0) + if opt.Comments { + parserMode |= parser.ParseComments + } + if opt.AllErrors { + parserMode |= parser.AllErrors + } + parserMode |= extraMode + + file, err := parser.ParseFile(fileSet, filename, src, parserMode) + if file == nil { return nil, err } // Apply the fixes to the file. apply(fileSet, file, fixes) - return formatFile(fileSet, file, src, adjust, opt) + return formatFile(fileSet, file, src, nil, opt) } -// GetAllCandidates gets all of the standard library candidate packages to import in -// sorted order on import path. -func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) { - _, opt, err = initialize(filename, nil, opt) +// GetAllCandidates gets all of the packages starting with prefix that can be +// imported by filename, sorted by import path. +func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getAllCandidates(filename, opt.Env) + return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env) } // GetPackageExports returns all known packages with name pkg and their exports. -func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) { - _, opt, err = initialize(filename, nil, opt) +func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error { + _, opt, err := initialize(filename, []byte{}, opt) if err != nil { - return nil, err + return err } - return getPackageExports(pkg, filename, opt.Env) + return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env) } // initialize sets the values for opt and src. @@ -133,16 +146,14 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er // Set the env if the user has not provided it. if opt.Env == nil { opt.Env = &ProcessEnv{ - GOPATH: build.Default.GOPATH, - GOROOT: build.Default.GOROOT, + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + GOFLAGS: os.Getenv("GOFLAGS"), + GO111MODULE: os.Getenv("GO111MODULE"), + GOPROXY: os.Getenv("GOPROXY"), + GOSUMDB: os.Getenv("GOSUMDB"), } } - - // Set the logger if the user has not provided it. - if opt.Env.Logf == nil { - opt.Env.Logf = log.Printf - } - if src == nil { b, err := ioutil.ReadFile(filename) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 0f9b87eb7331e..69e3eecc4c7fd 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -13,11 +13,10 @@ import ( "sort" "strconv" "strings" - "sync" + "golang.org/x/mod/module" + "golang.org/x/mod/semver" "golang.org/x/tools/internal/gopathwalk" - "golang.org/x/tools/internal/module" - "golang.org/x/tools/internal/semver" ) // ModuleResolver implements resolver for modules using the go command as little @@ -26,11 +25,14 @@ type ModuleResolver struct { env *ProcessEnv moduleCacheDir string dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory. + roots []gopathwalk.Root + scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots. + scannedRoots map[gopathwalk.Root]bool - Initialized bool - Main *ModuleJSON - ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... - ModsByDir []*ModuleJSON // ...or Dir. + initialized bool + main *ModuleJSON + modsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path... + modsByDir []*ModuleJSON // ...or Dir. // moduleCacheCache stores information about the module cache. moduleCacheCache *dirInfoCache @@ -41,13 +43,23 @@ type ModuleJSON struct { Path string // module path Replace *ModuleJSON // replaced by this module Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? Dir string // directory holding files for this module, if any GoMod string // path to go.mod file for this module, if any GoVersion string // go version used in module } +func newModuleResolver(e *ProcessEnv) *ModuleResolver { + r := &ModuleResolver{ + env: e, + scanSema: make(chan struct{}, 1), + } + r.scanSema <- struct{}{} + return r +} + func (r *ModuleResolver) init() error { - if r.Initialized { + if r.initialized { return nil } mainMod, vendorEnabled, err := vendorEnabled(r.env) @@ -58,13 +70,13 @@ func (r *ModuleResolver) init() error { if mainMod != nil && vendorEnabled { // Vendor mode is on, so all the non-Main modules are irrelevant, // and we need to search /vendor for everything. - r.Main = mainMod + r.main = mainMod r.dummyVendorMod = &ModuleJSON{ Path: "", Dir: filepath.Join(mainMod.Dir, "vendor"), } - r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} - r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod} + r.modsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod} } else { // Vendor mode is off, so run go list -m ... to find everything. r.initAllMods() @@ -72,35 +84,69 @@ func (r *ModuleResolver) init() error { r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod") - sort.Slice(r.ModsByModPath, func(i, j int) bool { + sort.Slice(r.modsByModPath, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByModPath[x].Path, "/") + return strings.Count(r.modsByModPath[x].Path, "/") } return count(j) < count(i) // descending order }) - sort.Slice(r.ModsByDir, func(i, j int) bool { + sort.Slice(r.modsByDir, func(i, j int) bool { count := func(x int) int { - return strings.Count(r.ModsByDir[x].Dir, "/") + return strings.Count(r.modsByDir[x].Dir, "/") } return count(j) < count(i) // descending order }) + r.roots = []gopathwalk.Root{ + {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, + } + if r.main != nil { + r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule}) + } + if vendorEnabled { + r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) + } else { + addDep := func(mod *ModuleJSON) { + if mod.Replace == nil { + // This is redundant with the cache, but we'll skip it cheaply enough. + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache}) + } else { + r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) + } + } + // Walk dependent modules before scanning the full mod cache, direct deps first. + for _, mod := range r.modsByModPath { + if !mod.Indirect && !mod.Main { + addDep(mod) + } + } + for _, mod := range r.modsByModPath { + if mod.Indirect && !mod.Main { + addDep(mod) + } + } + r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) + } + + r.scannedRoots = map[gopathwalk.Root]bool{} if r.moduleCacheCache == nil { r.moduleCacheCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } if r.otherCache == nil { r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } } - r.Initialized = true + r.initialized = true return nil } func (r *ModuleResolver) initAllMods() error { - stdout, err := r.env.invokeGo("list", "-m", "-json", "...") + stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...") if err != nil { return err } @@ -110,33 +156,43 @@ func (r *ModuleResolver) initAllMods() error { return err } if mod.Dir == "" { - if r.env.Debug { + if r.env.Logf != nil { r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path) } // Can't do anything with a module that's not downloaded. continue } - r.ModsByModPath = append(r.ModsByModPath, mod) - r.ModsByDir = append(r.ModsByDir, mod) + // golang/go#36193: the go command doesn't always clean paths. + mod.Dir = filepath.Clean(mod.Dir) + r.modsByModPath = append(r.modsByModPath, mod) + r.modsByDir = append(r.modsByDir, mod) if mod.Main { - r.Main = mod + r.main = mod } } return nil } func (r *ModuleResolver) ClearForNewScan() { + <-r.scanSema + r.scannedRoots = map[gopathwalk.Root]bool{} r.otherCache = &dirInfoCache{ - dirs: map[string]*directoryPackageInfo{}, + dirs: map[string]*directoryPackageInfo{}, + listeners: map[*int]cacheListener{}, } + r.scanSema <- struct{}{} } func (r *ModuleResolver) ClearForNewMod() { - env := r.env + <-r.scanSema *r = ModuleResolver{ - env: env, + env: r.env, + moduleCacheCache: r.moduleCacheCache, + otherCache: r.otherCache, + scanSema: r.scanSema, } r.init() + r.scanSema <- struct{}{} } // findPackage returns the module and directory that contains the package at @@ -144,7 +200,7 @@ func (r *ModuleResolver) ClearForNewMod() { func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) { // This can't find packages in the stdlib, but that's harmless for all // the existing code paths. - for _, m := range r.ModsByModPath { + for _, m := range r.modsByModPath { if !strings.HasPrefix(importPath, m.Path) { continue } @@ -211,7 +267,7 @@ func (r *ModuleResolver) cacheKeys() []string { } // cachePackageName caches the package name for a dir already in the cache. -func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) { if info.rootType == gopathwalk.RootModuleCache { return r.moduleCacheCache.CachePackageName(info) } @@ -238,7 +294,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON { // - in /vendor/ in -mod=vendor mode. // - nested module? Dunno. // Rumor has it that replace targets cannot contain other replace targets. - for _, m := range r.ModsByDir { + for _, m := range r.modsByDir { if !strings.HasPrefix(dir, m.Dir) { continue } @@ -333,41 +389,49 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) ( return names, nil } -func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) { +func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error { if err := r.init(); err != nil { - return nil, err + return err } - // Walk GOROOT, GOPATH/pkg/mod, and the main module. - roots := []gopathwalk.Root{ - {filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT}, - } - if r.Main != nil { - roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule}) - } - if r.dummyVendorMod != nil { - roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther}) - } else { - roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache}) - // Walk replace targets, just in case they're not in any of the above. - for _, mod := range r.ModsByModPath { - if mod.Replace != nil { - roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther}) - } + processDir := func(info directoryPackageInfo) { + // Skip this directory if we were not able to get the package information successfully. + if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { + return + } + pkg, err := r.canonicalize(info) + if err != nil { + return + } + + if !callback.dirFound(pkg) { + return + } + pkg.packageName, err = r.cachePackageName(info) + if err != nil { + return } - } - roots = filterRoots(roots, exclude) + if !callback.packageNameLoaded(pkg) { + return + } + _, exports, err := r.loadExports(ctx, pkg, false) + if err != nil { + return + } + callback.exportsLoaded(pkg, exports) + } - var result []*pkg - var mu sync.Mutex + // Start processing everything in the cache, and listen for the new stuff + // we discover in the walk below. + stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir) + defer stop1() + stop2 := r.otherCache.ScanAndListen(ctx, processDir) + defer stop2() - // We assume cached directories have not changed. We can skip them and their - // children. + // We assume cached directories are fully cached, including all their + // children, and have not changed. We can skip them. skip := func(root gopathwalk.Root, dir string) bool { - mu.Lock() - defer mu.Unlock() - info, ok := r.cacheLoad(dir) if !ok { return false @@ -379,44 +443,64 @@ func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk return packageScanned } - // Add anything new to the cache. We'll process everything in it below. + // Add anything new to the cache, and process it if we're still listening. add := func(root gopathwalk.Root, dir string) { - mu.Lock() - defer mu.Unlock() - r.cacheStore(r.scanDirForPackage(root, dir)) } - gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true}) - - // Everything we already had, and everything new, is now in the cache. - for _, dir := range r.cacheKeys() { - info, ok := r.cacheLoad(dir) - if !ok { - continue - } - - // Skip this directory if we were not able to get the package information successfully. - if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - continue - } + // r.roots and the callback are not necessarily safe to use in the + // goroutine below. Process them eagerly. + roots := filterRoots(r.roots, callback.rootFound) + // We can't cancel walks, because we need them to finish to have a usable + // cache. Instead, run them in a separate goroutine and detach. + scanDone := make(chan struct{}) + go func() { + select { + case <-ctx.Done(): + return + case <-r.scanSema: + } + defer func() { r.scanSema <- struct{}{} }() + // We have the lock on r.scannedRoots, and no other scans can run. + for _, root := range roots { + if ctx.Err() != nil { + return + } - // If we want package names, make sure the cache has them. - if loadNames { - var err error - if info, err = r.cachePackageName(info); err != nil { + if r.scannedRoots[root] { continue } + gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true}) + r.scannedRoots[root] = true } + close(scanDone) + }() + select { + case <-ctx.Done(): + case <-scanDone: + } + return nil +} - res, err := r.canonicalize(info) - if err != nil { - continue - } - result = append(result, res) +func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int { + if _, ok := stdlib[path]; ok { + return MaxRelevance } + mod, _ := r.findPackage(path) + return modRelevance(mod) +} - return result, nil +func modRelevance(mod *ModuleJSON) int { + switch { + case mod == nil: // out of scope + return MaxRelevance - 4 + case mod.Indirect: + return MaxRelevance - 3 + case !mod.Main: + return MaxRelevance - 2 + default: + return MaxRelevance - 1 // main module ties with stdlib + } } // canonicalize gets the result of canonicalizing the packages using the results @@ -428,15 +512,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { importPathShort: info.nonCanonicalImportPath, dir: info.dir, packageName: path.Base(info.nonCanonicalImportPath), - relevance: 0, + relevance: MaxRelevance, }, nil } importPath := info.nonCanonicalImportPath - relevance := 2 + mod := r.findModuleByDir(info.dir) // Check if the directory is underneath a module that's in scope. - if mod := r.findModuleByDir(info.dir); mod != nil { - relevance = 1 + if mod != nil { // It is. If dir is the target of a replace directive, // our guessed import path is wrong. Use the real one. if mod.Dir == info.dir { @@ -445,15 +528,16 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { dirInMod := info.dir[len(mod.Dir)+len("/"):] importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod)) } - } else if info.needsReplace { + } else if !strings.HasPrefix(importPath, info.moduleName) { + // The module's name doesn't match the package's import path. It + // probably needs a replace directive we don't have. return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir) } res := &pkg{ importPathShort: importPath, dir: info.dir, - packageName: info.packageName, // may not be populated if the caller didn't ask for it - relevance: relevance, + relevance: modRelevance(mod), } // We may have discovered a package that has a different version // in scope already. Canonicalize to that one if possible. @@ -463,14 +547,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) { return res, nil } -func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) { +func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) { if err := r.init(); err != nil { return "", nil, err } - if info, ok := r.cacheLoad(pkg.dir); ok { + if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest { return r.cacheExports(ctx, r.env, info) } - return loadExportsFromFiles(ctx, r.env, pkg.dir) + return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest) } func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo { @@ -488,7 +572,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir } switch root.Type { case gopathwalk.RootCurrentModule: - importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir)) + importPath = path.Join(r.main.Path, filepath.ToSlash(subdir)) case gopathwalk.RootModuleCache: matches := modCacheRegexp.FindStringSubmatch(subdir) if len(matches) == 0 { @@ -497,9 +581,9 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir err: fmt.Errorf("invalid module cache path: %v", subdir), } } - modPath, err := module.DecodePath(filepath.ToSlash(matches[1])) + modPath, err := module.UnescapePath(filepath.ToSlash(matches[1])) if err != nil { - if r.env.Debug { + if r.env.Logf != nil { r.env.Logf("decoding module cache path %q: %v", subdir, err) } return directoryPackageInfo{ @@ -516,7 +600,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir dir: dir, rootType: root.Type, nonCanonicalImportPath: importPath, - needsReplace: false, moduleDir: modDir, moduleName: modName, } @@ -524,14 +607,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir // stdlib packages are always in scope, despite the confusing go.mod return result } - // Check that this package is not obviously impossible to import. - if !strings.HasPrefix(importPath, modName) { - // The module's declared path does not match - // its expected path. It probably needs a - // replace directive we don't have. - result.needsReplace = true - } - return result } @@ -624,7 +699,7 @@ func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) { {{.GoVersion}} {{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}} ` - stdout, err := env.invokeGo("list", "-m", "-f", format) + stdout, err := env.invokeGo(context.TODO(), "list", "-m", "-f", format) if err != nil { return nil, false, nil } diff --git a/vendor/golang.org/x/tools/internal/imports/mod_cache.go b/vendor/golang.org/x/tools/internal/imports/mod_cache.go index f6b070a3f6eda..5b4f03accddc2 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod_cache.go +++ b/vendor/golang.org/x/tools/internal/imports/mod_cache.go @@ -49,10 +49,6 @@ type directoryPackageInfo struct { // nonCanonicalImportPath is the package's expected import path. It may // not actually be importable at that path. nonCanonicalImportPath string - // needsReplace is true if the nonCanonicalImportPath does not match the - // module's declared path, making it impossible to import without a - // replace directive. - needsReplace bool // Module-related information. moduleDir string // The directory that is the module root of this dir. @@ -97,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) ( type dirInfoCache struct { mu sync.Mutex // dirs stores information about packages in directories, keyed by absolute path. - dirs map[string]*directoryPackageInfo + dirs map[string]*directoryPackageInfo + listeners map[*int]cacheListener +} + +type cacheListener func(directoryPackageInfo) + +// ScanAndListen calls listener on all the items in the cache, and on anything +// newly added. The returned stop function waits for all in-flight callbacks to +// finish and blocks new ones. +func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() { + ctx, cancel := context.WithCancel(ctx) + + // Flushing out all the callbacks is tricky without knowing how many there + // are going to be. Setting an arbitrary limit makes it much easier. + const maxInFlight = 10 + sema := make(chan struct{}, maxInFlight) + for i := 0; i < maxInFlight; i++ { + sema <- struct{}{} + } + + cookie := new(int) // A unique ID we can use for the listener. + + // We can't hold mu while calling the listener. + d.mu.Lock() + var keys []string + for key := range d.dirs { + keys = append(keys, key) + } + d.listeners[cookie] = func(info directoryPackageInfo) { + select { + case <-ctx.Done(): + return + case <-sema: + } + listener(info) + sema <- struct{}{} + } + d.mu.Unlock() + + stop := func() { + cancel() + d.mu.Lock() + delete(d.listeners, cookie) + d.mu.Unlock() + for i := 0; i < maxInFlight; i++ { + <-sema + } + } + + // Process the pre-existing keys. + for _, k := range keys { + select { + case <-ctx.Done(): + return stop + default: + } + if v, ok := d.Load(k); ok { + listener(v) + } + } + + return stop } // Store stores the package info for dir. func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) { d.mu.Lock() - defer d.mu.Unlock() - stored := info // defensive copy - d.dirs[dir] = &stored + _, old := d.dirs[dir] + d.dirs[dir] = &info + var listeners []cacheListener + for _, l := range d.listeners { + listeners = append(listeners, l) + } + d.mu.Unlock() + + if !old { + for _, l := range listeners { + l(info) + } + } } // Load returns a copy of the directoryPackageInfo for absolute directory dir. @@ -129,17 +196,17 @@ func (d *dirInfoCache) Keys() (keys []string) { return keys } -func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) { +func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) { if loaded, err := info.reachedStatus(nameLoaded); loaded { - return info, err + return info.packageName, err } if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil { - return info, fmt.Errorf("cannot read package name, scan error: %v", err) + return "", fmt.Errorf("cannot read package name, scan error: %v", err) } info.packageName, info.err = packageDirToName(info.dir) info.status = nameLoaded d.Store(info.dir, info) - return info, info.err + return info.packageName, info.err } func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) { @@ -149,8 +216,8 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d if reached, err := info.reachedStatus(nameLoaded); reached && err != nil { return "", nil, err } - info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir) - if info.err == context.Canceled { + info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false) + if info.err == context.Canceled || info.err == context.DeadlineExceeded { return info.packageName, info.exports, info.err } // The cache structure wants things to proceed linearly. We can skip a diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go new file mode 100644 index 0000000000000..b13ce33a39f05 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -0,0 +1,27 @@ +// Package packagesinternal exposes internal-only fields from go/packages. +package packagesinternal + +import "time" + +// Fields must match go list; +type Module struct { + Path string // module path + Version string // module version + Versions []string // available module versions (with -versions) + Replace *Module // replaced by this module + Time *time.Time // time version was created + Update *Module // available update, if any (with -u) + Main bool // is this the main module? + Indirect bool // is this module only an indirect dependency of main module? + Dir string // directory holding files for this module, if any + GoMod string // path to go.mod file used when loading this module, if any + GoVersion string // go version used in module + Error *ModuleError // error loading module +} +type ModuleError struct { + Err string // the error itself +} + +var GetForTest = func(p interface{}) string { return "" } + +var GetModule = func(p interface{}) *Module { return nil } diff --git a/vendor/golang.org/x/tools/internal/span/parse.go b/vendor/golang.org/x/tools/internal/span/parse.go deleted file mode 100644 index b3f268a38aea7..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/parse.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "strconv" - "strings" - "unicode/utf8" -) - -// Parse returns the location represented by the input. -// All inputs are valid locations, as they can always be a pure filename. -// The returned span will be normalized, and thus if printed may produce a -// different string. -func Parse(input string) Span { - // :0:0#0-0:0#0 - valid := input - var hold, offset int - hadCol := false - suf := rstripSuffix(input) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep == ":" { - valid = suf.remains - hold = suf.num - hadCol = true - suf = rstripSuffix(suf.remains) - } - switch { - case suf.sep == ":": - return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{}) - case suf.sep == "-": - // we have a span, fall out of the case to continue - default: - // separator not valid, rewind to either the : or the start - return New(NewURI(valid), NewPoint(hold, 0, offset), Point{}) - } - // only the span form can get here - // at this point we still don't know what the numbers we have mean - // if have not yet seen a : then we might have either a line or a column depending - // on whether start has a column or not - // we build an end point and will fix it later if needed - end := NewPoint(suf.num, hold, offset) - hold, offset = 0, 0 - suf = rstripSuffix(suf.remains) - if suf.sep == "#" { - offset = suf.num - suf = rstripSuffix(suf.remains) - } - if suf.sep != ":" { - // turns out we don't have a span after all, rewind - return New(NewURI(valid), end, Point{}) - } - valid = suf.remains - hold = suf.num - suf = rstripSuffix(suf.remains) - if suf.sep != ":" { - // line#offset only - return New(NewURI(valid), NewPoint(hold, 0, offset), end) - } - // we have a column, so if end only had one number, it is also the column - if !hadCol { - end = NewPoint(suf.num, end.v.Line, end.v.Offset) - } - return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end) -} - -type suffix struct { - remains string - sep string - num int -} - -func rstripSuffix(input string) suffix { - if len(input) == 0 { - return suffix{"", "", -1} - } - remains := input - num := -1 - // first see if we have a number at the end - last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' }) - if last >= 0 && last < len(remains)-1 { - number, err := strconv.ParseInt(remains[last+1:], 10, 64) - if err == nil { - num = int(number) - remains = remains[:last+1] - } - } - // now see if we have a trailing separator - r, w := utf8.DecodeLastRuneInString(remains) - if r != ':' && r != '#' && r == '#' { - return suffix{input, "", -1} - } - remains = remains[:len(remains)-w] - return suffix{remains, string(r), num} -} diff --git a/vendor/golang.org/x/tools/internal/span/span.go b/vendor/golang.org/x/tools/internal/span/span.go deleted file mode 100644 index 4d2ad0986670b..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/span.go +++ /dev/null @@ -1,285 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package span contains support for representing with positions and ranges in -// text files. -package span - -import ( - "encoding/json" - "fmt" - "path" -) - -// Span represents a source code range in standardized form. -type Span struct { - v span -} - -// Point represents a single point within a file. -// In general this should only be used as part of a Span, as on its own it -// does not carry enough information. -type Point struct { - v point -} - -type span struct { - URI URI `json:"uri"` - Start point `json:"start"` - End point `json:"end"` -} - -type point struct { - Line int `json:"line"` - Column int `json:"column"` - Offset int `json:"offset"` -} - -// Invalid is a span that reports false from IsValid -var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}} - -var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}} - -// Converter is the interface to an object that can convert between line:column -// and offset forms for a single file. -type Converter interface { - //ToPosition converts from an offset to a line:column pair. - ToPosition(offset int) (int, int, error) - //ToOffset converts from a line:column pair to an offset. - ToOffset(line, col int) (int, error) -} - -func New(uri URI, start Point, end Point) Span { - s := Span{v: span{URI: uri, Start: start.v, End: end.v}} - s.v.clean() - return s -} - -func NewPoint(line, col, offset int) Point { - p := Point{v: point{Line: line, Column: col, Offset: offset}} - p.v.clean() - return p -} - -func Compare(a, b Span) int { - if r := CompareURI(a.URI(), b.URI()); r != 0 { - return r - } - if r := comparePoint(a.v.Start, b.v.Start); r != 0 { - return r - } - return comparePoint(a.v.End, b.v.End) -} - -func ComparePoint(a, b Point) int { - return comparePoint(a.v, b.v) -} - -func comparePoint(a, b point) int { - if !a.hasPosition() { - if a.Offset < b.Offset { - return -1 - } - if a.Offset > b.Offset { - return 1 - } - return 0 - } - if a.Line < b.Line { - return -1 - } - if a.Line > b.Line { - return 1 - } - if a.Column < b.Column { - return -1 - } - if a.Column > b.Column { - return 1 - } - return 0 -} - -func (s Span) HasPosition() bool { return s.v.Start.hasPosition() } -func (s Span) HasOffset() bool { return s.v.Start.hasOffset() } -func (s Span) IsValid() bool { return s.v.Start.isValid() } -func (s Span) IsPoint() bool { return s.v.Start == s.v.End } -func (s Span) URI() URI { return s.v.URI } -func (s Span) Start() Point { return Point{s.v.Start} } -func (s Span) End() Point { return Point{s.v.End} } -func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) } -func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) } - -func (p Point) HasPosition() bool { return p.v.hasPosition() } -func (p Point) HasOffset() bool { return p.v.hasOffset() } -func (p Point) IsValid() bool { return p.v.isValid() } -func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) } -func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) } -func (p Point) Line() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Line -} -func (p Point) Column() int { - if !p.v.hasPosition() { - panic(fmt.Errorf("position not set in %v", p.v)) - } - return p.v.Column -} -func (p Point) Offset() int { - if !p.v.hasOffset() { - panic(fmt.Errorf("offset not set in %v", p.v)) - } - return p.v.Offset -} - -func (p point) hasPosition() bool { return p.Line > 0 } -func (p point) hasOffset() bool { return p.Offset >= 0 } -func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() } -func (p point) isZero() bool { - return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0) -} - -func (s *span) clean() { - //this presumes the points are already clean - if !s.End.isValid() || (s.End == point{}) { - s.End = s.Start - } -} - -func (p *point) clean() { - if p.Line < 0 { - p.Line = 0 - } - if p.Column <= 0 { - if p.Line > 0 { - p.Column = 1 - } else { - p.Column = 0 - } - } - if p.Offset == 0 && (p.Line > 1 || p.Column > 1) { - p.Offset = -1 - } -} - -// Format implements fmt.Formatter to print the Location in a standard form. -// The format produced is one that can be read back in using Parse. -func (s Span) Format(f fmt.State, c rune) { - fullForm := f.Flag('+') - preferOffset := f.Flag('#') - // we should always have a uri, simplify if it is file format - //TODO: make sure the end of the uri is unambiguous - uri := string(s.v.URI) - if c == 'f' { - uri = path.Base(uri) - } else if !fullForm { - uri = s.v.URI.Filename() - } - fmt.Fprint(f, uri) - if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) { - return - } - // see which bits of start to write - printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition()) - printLine := s.HasPosition() && (fullForm || !printOffset) - printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1)) - fmt.Fprint(f, ":") - if printLine { - fmt.Fprintf(f, "%d", s.v.Start.Line) - } - if printColumn { - fmt.Fprintf(f, ":%d", s.v.Start.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.Start.Offset) - } - // start is written, do we need end? - if s.IsPoint() { - return - } - // we don't print the line if it did not change - printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line) - fmt.Fprint(f, "-") - if printLine { - fmt.Fprintf(f, "%d", s.v.End.Line) - } - if printColumn { - if printLine { - fmt.Fprint(f, ":") - } - fmt.Fprintf(f, "%d", s.v.End.Column) - } - if printOffset { - fmt.Fprintf(f, "#%d", s.v.End.Offset) - } -} - -func (s Span) WithPosition(c Converter) (Span, error) { - if err := s.update(c, true, false); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithOffset(c Converter) (Span, error) { - if err := s.update(c, false, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s Span) WithAll(c Converter) (Span, error) { - if err := s.update(c, true, true); err != nil { - return Span{}, err - } - return s, nil -} - -func (s *Span) update(c Converter, withPos, withOffset bool) error { - if !s.IsValid() { - return fmt.Errorf("cannot add information to an invalid span") - } - if withPos && !s.HasPosition() { - if err := s.v.Start.updatePosition(c); err != nil { - return err - } - if s.v.End.Offset == s.v.Start.Offset { - s.v.End = s.v.Start - } else if err := s.v.End.updatePosition(c); err != nil { - return err - } - } - if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) { - if err := s.v.Start.updateOffset(c); err != nil { - return err - } - if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column { - s.v.End.Offset = s.v.Start.Offset - } else if err := s.v.End.updateOffset(c); err != nil { - return err - } - } - return nil -} - -func (p *point) updatePosition(c Converter) error { - line, col, err := c.ToPosition(p.Offset) - if err != nil { - return err - } - p.Line = line - p.Column = col - return nil -} - -func (p *point) updateOffset(c Converter) error { - offset, err := c.ToOffset(p.Line, p.Column) - if err != nil { - return err - } - p.Offset = offset - return nil -} diff --git a/vendor/golang.org/x/tools/internal/span/token.go b/vendor/golang.org/x/tools/internal/span/token.go deleted file mode 100644 index ce44541b2fc49..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/token.go +++ /dev/null @@ -1,151 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "go/token" -) - -// Range represents a source code range in token.Pos form. -// It also carries the FileSet that produced the positions, so that it is -// self contained. -type Range struct { - FileSet *token.FileSet - Start token.Pos - End token.Pos -} - -// TokenConverter is a Converter backed by a token file set and file. -// It uses the file set methods to work out the conversions, which -// makes it fast and does not require the file contents. -type TokenConverter struct { - fset *token.FileSet - file *token.File -} - -// NewRange creates a new Range from a FileSet and two positions. -// To represent a point pass a 0 as the end pos. -func NewRange(fset *token.FileSet, start, end token.Pos) Range { - return Range{ - FileSet: fset, - Start: start, - End: end, - } -} - -// NewTokenConverter returns an implementation of Converter backed by a -// token.File. -func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter { - return &TokenConverter{fset: fset, file: f} -} - -// NewContentConverter returns an implementation of Converter for the -// given file content. -func NewContentConverter(filename string, content []byte) *TokenConverter { - fset := token.NewFileSet() - f := fset.AddFile(filename, -1, len(content)) - f.SetLinesForContent(content) - return &TokenConverter{fset: fset, file: f} -} - -// IsPoint returns true if the range represents a single point. -func (r Range) IsPoint() bool { - return r.Start == r.End -} - -// Span converts a Range to a Span that represents the Range. -// It will fill in all the members of the Span, calculating the line and column -// information. -func (r Range) Span() (Span, error) { - f := r.FileSet.File(r.Start) - if f == nil { - return Span{}, fmt.Errorf("file not found in FileSet") - } - s := Span{v: span{URI: FileURI(f.Name())}} - var err error - s.v.Start.Offset, err = offset(f, r.Start) - if err != nil { - return Span{}, err - } - if r.End.IsValid() { - s.v.End.Offset, err = offset(f, r.End) - if err != nil { - return Span{}, err - } - } - s.v.Start.clean() - s.v.End.clean() - s.v.clean() - converter := NewTokenConverter(r.FileSet, f) - return s.WithPosition(converter) -} - -// offset is a copy of the Offset function in go/token, but with the adjustment -// that it does not panic on invalid positions. -func offset(f *token.File, pos token.Pos) (int, error) { - if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() { - return 0, fmt.Errorf("invalid pos") - } - return int(pos) - f.Base(), nil -} - -// Range converts a Span to a Range that represents the Span for the supplied -// File. -func (s Span) Range(converter *TokenConverter) (Range, error) { - s, err := s.WithOffset(converter) - if err != nil { - return Range{}, err - } - // go/token will panic if the offset is larger than the file's size, - // so check here to avoid panicking. - if s.Start().Offset() > converter.file.Size() { - return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size()) - } - if s.End().Offset() > converter.file.Size() { - return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size()) - } - return Range{ - FileSet: converter.fset, - Start: converter.file.Pos(s.Start().Offset()), - End: converter.file.Pos(s.End().Offset()), - }, nil -} - -func (l *TokenConverter) ToPosition(offset int) (int, int, error) { - if offset > l.file.Size() { - return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size()) - } - pos := l.file.Pos(offset) - p := l.fset.Position(pos) - if offset == l.file.Size() { - return p.Line + 1, 1, nil - } - return p.Line, p.Column, nil -} - -func (l *TokenConverter) ToOffset(line, col int) (int, error) { - if line < 0 { - return -1, fmt.Errorf("line is not valid") - } - lineMax := l.file.LineCount() + 1 - if line > lineMax { - return -1, fmt.Errorf("line is beyond end of file %v", lineMax) - } else if line == lineMax { - if col > 1 { - return -1, fmt.Errorf("column is beyond end of file") - } - // at the end of the file, allowing for a trailing eol - return l.file.Size(), nil - } - pos := lineStart(l.file, line) - if !pos.IsValid() { - return -1, fmt.Errorf("line is not in file") - } - // we assume that column is in bytes here, and that the first byte of a - // line is at column 1 - pos += token.Pos(col - 1) - return offset(l.file, pos) -} diff --git a/vendor/golang.org/x/tools/internal/span/token111.go b/vendor/golang.org/x/tools/internal/span/token111.go deleted file mode 100644 index bf7a5406b6e0e..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/token111.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.12 - -package span - -import ( - "go/token" -) - -// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go -// versions <= 1.11, we borrow logic from the analysisutil package. -// TODO(rstambler): Delete this file when we no longer support Go 1.11. -func lineStart(f *token.File, line int) token.Pos { - // Use binary search to find the start offset of this line. - - min := 0 // inclusive - max := f.Size() // exclusive - for { - offset := (min + max) / 2 - pos := f.Pos(offset) - posn := f.Position(pos) - if posn.Line == line { - return pos - (token.Pos(posn.Column) - 1) - } - - if min+1 >= max { - return token.NoPos - } - - if posn.Line < line { - min = offset - } else { - max = offset - } - } -} diff --git a/vendor/golang.org/x/tools/internal/span/token112.go b/vendor/golang.org/x/tools/internal/span/token112.go deleted file mode 100644 index 017aec9c13eec..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/token112.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.12 - -package span - -import ( - "go/token" -) - -// TODO(rstambler): Delete this file when we no longer support Go 1.11. -func lineStart(f *token.File, line int) token.Pos { - return f.LineStart(line) -} diff --git a/vendor/golang.org/x/tools/internal/span/uri.go b/vendor/golang.org/x/tools/internal/span/uri.go deleted file mode 100644 index e05a9e6ef5df8..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/uri.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "net/url" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "unicode" -) - -const fileScheme = "file" - -// URI represents the full URI for a file. -type URI string - -// Filename returns the file path for the given URI. -// It is an error to call this on a URI that is not a valid filename. -func (uri URI) Filename() string { - filename, err := filename(uri) - if err != nil { - panic(err) - } - return filepath.FromSlash(filename) -} - -func filename(uri URI) (string, error) { - if uri == "" { - return "", nil - } - u, err := url.ParseRequestURI(string(uri)) - if err != nil { - return "", err - } - if u.Scheme != fileScheme { - return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri) - } - if isWindowsDriveURI(u.Path) { - u.Path = u.Path[1:] - } - return u.Path, nil -} - -// NewURI returns a span URI for the string. -// It will attempt to detect if the string is a file path or uri. -func NewURI(s string) URI { - if u, err := url.PathUnescape(s); err == nil { - s = u - } - if strings.HasPrefix(s, fileScheme+"://") { - return URI(s) - } - return FileURI(s) -} - -func CompareURI(a, b URI) int { - if equalURI(a, b) { - return 0 - } - if a < b { - return -1 - } - return 1 -} - -func equalURI(a, b URI) bool { - if a == b { - return true - } - // If we have the same URI basename, we may still have the same file URIs. - if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) { - return false - } - fa, err := filename(a) - if err != nil { - return false - } - fb, err := filename(b) - if err != nil { - return false - } - // Stat the files to check if they are equal. - infoa, err := os.Stat(filepath.FromSlash(fa)) - if err != nil { - return false - } - infob, err := os.Stat(filepath.FromSlash(fb)) - if err != nil { - return false - } - return os.SameFile(infoa, infob) -} - -// FileURI returns a span URI for the supplied file path. -// It will always have the file scheme. -func FileURI(path string) URI { - if path == "" { - return "" - } - // Handle standard library paths that contain the literal "$GOROOT". - // TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT. - const prefix = "$GOROOT" - if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) { - suffix := path[len(prefix):] - path = runtime.GOROOT() + suffix - } - if !isWindowsDrivePath(path) { - if abs, err := filepath.Abs(path); err == nil { - path = abs - } - } - // Check the file path again, in case it became absolute. - if isWindowsDrivePath(path) { - path = "/" + path - } - path = filepath.ToSlash(path) - u := url.URL{ - Scheme: fileScheme, - Path: path, - } - uri := u.String() - if unescaped, err := url.PathUnescape(uri); err == nil { - uri = unescaped - } - return URI(uri) -} - -// isWindowsDrivePath returns true if the file path is of the form used by -// Windows. We check if the path begins with a drive letter, followed by a ":". -func isWindowsDrivePath(path string) bool { - if len(path) < 4 { - return false - } - return unicode.IsLetter(rune(path[0])) && path[1] == ':' -} - -// isWindowsDriveURI returns true if the file URI is of the format used by -// Windows URIs. The url.Parse package does not specially handle Windows paths -// (see https://golang.org/issue/6027). We check if the URI path has -// a drive prefix (e.g. "/C:"). If so, we trim the leading "/". -func isWindowsDriveURI(uri string) bool { - if len(uri) < 4 { - return false - } - return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':' -} diff --git a/vendor/golang.org/x/tools/internal/span/utf16.go b/vendor/golang.org/x/tools/internal/span/utf16.go deleted file mode 100644 index 561b3fa50a835..0000000000000 --- a/vendor/golang.org/x/tools/internal/span/utf16.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package span - -import ( - "fmt" - "unicode/utf16" - "unicode/utf8" -) - -// ToUTF16Column calculates the utf16 column expressed by the point given the -// supplied file contents. -// This is used to convert from the native (always in bytes) column -// representation and the utf16 counts used by some editors. -func ToUTF16Column(p Point, content []byte) (int, error) { - if content == nil { - return -1, fmt.Errorf("ToUTF16Column: missing content") - } - if !p.HasPosition() { - return -1, fmt.Errorf("ToUTF16Column: point is missing position") - } - if !p.HasOffset() { - return -1, fmt.Errorf("ToUTF16Column: point is missing offset") - } - offset := p.Offset() // 0-based - colZero := p.Column() - 1 // 0-based - if colZero == 0 { - // 0-based column 0, so it must be chr 1 - return 1, nil - } else if colZero < 0 { - return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero) - } - // work out the offset at the start of the line using the column - lineOffset := offset - colZero - if lineOffset < 0 || offset > len(content) { - return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content)) - } - // Use the offset to pick out the line start. - // This cannot panic: offset > len(content) and lineOffset < offset. - start := content[lineOffset:] - - // Now, truncate down to the supplied column. - start = start[:colZero] - - // and count the number of utf16 characters - // in theory we could do this by hand more efficiently... - return len(utf16.Encode([]rune(string(start)))) + 1, nil -} - -// FromUTF16Column advances the point by the utf16 character offset given the -// supplied line contents. -// This is used to convert from the utf16 counts used by some editors to the -// native (always in bytes) column representation. -func FromUTF16Column(p Point, chr int, content []byte) (Point, error) { - if !p.HasOffset() { - return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset") - } - // if chr is 1 then no adjustment needed - if chr <= 1 { - return p, nil - } - if p.Offset() >= len(content) { - return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content)) - } - remains := content[p.Offset():] - // scan forward the specified number of characters - for count := 1; count < chr; count++ { - if len(remains) <= 0 { - return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content") - } - r, w := utf8.DecodeRune(remains) - if r == '\n' { - // Per the LSP spec: - // - // > If the character value is greater than the line length it - // > defaults back to the line length. - break - } - remains = remains[w:] - if r >= 0x10000 { - // a two point rune - count++ - // if we finished in a two point rune, do not advance past the first - if count >= chr { - break - } - } - p.v.Column += w - p.v.Offset += w - } - return p, nil -} diff --git a/vendor/golang.org/x/xerrors/LICENSE b/vendor/golang.org/x/xerrors/LICENSE new file mode 100644 index 0000000000000..e4a47e17f143b --- /dev/null +++ b/vendor/golang.org/x/xerrors/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2019 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/xerrors/PATENTS b/vendor/golang.org/x/xerrors/PATENTS new file mode 100644 index 0000000000000..733099041f84f --- /dev/null +++ b/vendor/golang.org/x/xerrors/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/xerrors/README b/vendor/golang.org/x/xerrors/README new file mode 100644 index 0000000000000..aac7867a560b8 --- /dev/null +++ b/vendor/golang.org/x/xerrors/README @@ -0,0 +1,2 @@ +This repository holds the transition packages for the new Go 1.13 error values. +See golang.org/design/29934-error-values. diff --git a/vendor/golang.org/x/xerrors/adaptor.go b/vendor/golang.org/x/xerrors/adaptor.go new file mode 100644 index 0000000000000..4317f24833131 --- /dev/null +++ b/vendor/golang.org/x/xerrors/adaptor.go @@ -0,0 +1,193 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strconv" +) + +// FormatError calls the FormatError method of f with an errors.Printer +// configured according to s and verb, and writes the result to s. +func FormatError(f Formatter, s fmt.State, verb rune) { + // Assuming this function is only called from the Format method, and given + // that FormatError takes precedence over Format, it cannot be called from + // any package that supports errors.Formatter. It is therefore safe to + // disregard that State may be a specific printer implementation and use one + // of our choice instead. + + // limitations: does not support printing error as Go struct. + + var ( + sep = " " // separator before next error + p = &state{State: s} + direct = true + ) + + var err error = f + + switch verb { + // Note that this switch must match the preference order + // for ordinary string printing (%#v before %+v, and so on). + + case 'v': + if s.Flag('#') { + if stringer, ok := err.(fmt.GoStringer); ok { + io.WriteString(&p.buf, stringer.GoString()) + goto exit + } + // proceed as if it were %v + } else if s.Flag('+') { + p.printDetail = true + sep = "\n - " + } + case 's': + case 'q', 'x', 'X': + // Use an intermediate buffer in the rare cases that precision, + // truncation, or one of the alternative verbs (q, x, and X) are + // specified. + direct = false + + default: + p.buf.WriteString("%!") + p.buf.WriteRune(verb) + p.buf.WriteByte('(') + switch { + case err != nil: + p.buf.WriteString(reflect.TypeOf(f).String()) + default: + p.buf.WriteString("") + } + p.buf.WriteByte(')') + io.Copy(s, &p.buf) + return + } + +loop: + for { + switch v := err.(type) { + case Formatter: + err = v.FormatError((*printer)(p)) + case fmt.Formatter: + v.Format(p, 'v') + break loop + default: + io.WriteString(&p.buf, v.Error()) + break loop + } + if err == nil { + break + } + if p.needColon || !p.printDetail { + p.buf.WriteByte(':') + p.needColon = false + } + p.buf.WriteString(sep) + p.inDetail = false + p.needNewline = false + } + +exit: + width, okW := s.Width() + prec, okP := s.Precision() + + if !direct || (okW && width > 0) || okP { + // Construct format string from State s. + format := []byte{'%'} + if s.Flag('-') { + format = append(format, '-') + } + if s.Flag('+') { + format = append(format, '+') + } + if s.Flag(' ') { + format = append(format, ' ') + } + if okW { + format = strconv.AppendInt(format, int64(width), 10) + } + if okP { + format = append(format, '.') + format = strconv.AppendInt(format, int64(prec), 10) + } + format = append(format, string(verb)...) + fmt.Fprintf(s, string(format), p.buf.String()) + } else { + io.Copy(s, &p.buf) + } +} + +var detailSep = []byte("\n ") + +// state tracks error printing state. It implements fmt.State. +type state struct { + fmt.State + buf bytes.Buffer + + printDetail bool + inDetail bool + needColon bool + needNewline bool +} + +func (s *state) Write(b []byte) (n int, err error) { + if s.printDetail { + if len(b) == 0 { + return 0, nil + } + if s.inDetail && s.needColon { + s.needNewline = true + if b[0] == '\n' { + b = b[1:] + } + } + k := 0 + for i, c := range b { + if s.needNewline { + if s.inDetail && s.needColon { + s.buf.WriteByte(':') + s.needColon = false + } + s.buf.Write(detailSep) + s.needNewline = false + } + if c == '\n' { + s.buf.Write(b[k:i]) + k = i + 1 + s.needNewline = true + } + } + s.buf.Write(b[k:]) + if !s.inDetail { + s.needColon = true + } + } else if !s.inDetail { + s.buf.Write(b) + } + return len(b), nil +} + +// printer wraps a state to implement an xerrors.Printer. +type printer state + +func (s *printer) Print(args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprint((*state)(s), args...) + } +} + +func (s *printer) Printf(format string, args ...interface{}) { + if !s.inDetail || s.printDetail { + fmt.Fprintf((*state)(s), format, args...) + } +} + +func (s *printer) Detail() bool { + s.inDetail = true + return s.printDetail +} diff --git a/vendor/golang.org/x/xerrors/codereview.cfg b/vendor/golang.org/x/xerrors/codereview.cfg new file mode 100644 index 0000000000000..3f8b14b64e83f --- /dev/null +++ b/vendor/golang.org/x/xerrors/codereview.cfg @@ -0,0 +1 @@ +issuerepo: golang/go diff --git a/vendor/golang.org/x/xerrors/doc.go b/vendor/golang.org/x/xerrors/doc.go new file mode 100644 index 0000000000000..eef99d9d54d74 --- /dev/null +++ b/vendor/golang.org/x/xerrors/doc.go @@ -0,0 +1,22 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xerrors implements functions to manipulate errors. +// +// This package is based on the Go 2 proposal for error values: +// https://golang.org/design/29934-error-values +// +// These functions were incorporated into the standard library's errors package +// in Go 1.13: +// - Is +// - As +// - Unwrap +// +// Also, Errorf's %w verb was incorporated into fmt.Errorf. +// +// Use this package to get equivalent behavior in all supported Go versions. +// +// No other features of this package were included in Go 1.13, and at present +// there are no plans to include any of them. +package xerrors // import "golang.org/x/xerrors" diff --git a/vendor/golang.org/x/xerrors/errors.go b/vendor/golang.org/x/xerrors/errors.go new file mode 100644 index 0000000000000..e88d3772d8611 --- /dev/null +++ b/vendor/golang.org/x/xerrors/errors.go @@ -0,0 +1,33 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import "fmt" + +// errorString is a trivial implementation of error. +type errorString struct { + s string + frame Frame +} + +// New returns an error that formats as the given text. +// +// The returned error contains a Frame set to the caller's location and +// implements Formatter to show this information when printed with details. +func New(text string) error { + return &errorString{text, Caller(1)} +} + +func (e *errorString) Error() string { + return e.s +} + +func (e *errorString) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *errorString) FormatError(p Printer) (next error) { + p.Print(e.s) + e.frame.Format(p) + return nil +} diff --git a/vendor/golang.org/x/xerrors/fmt.go b/vendor/golang.org/x/xerrors/fmt.go new file mode 100644 index 0000000000000..829862ddf6af1 --- /dev/null +++ b/vendor/golang.org/x/xerrors/fmt.go @@ -0,0 +1,187 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "golang.org/x/xerrors/internal" +) + +const percentBangString = "%!" + +// Errorf formats according to a format specifier and returns the string as a +// value that satisfies error. +// +// The returned error includes the file and line number of the caller when +// formatted with additional detail enabled. If the last argument is an error +// the returned error's Format method will return it if the format string ends +// with ": %s", ": %v", or ": %w". If the last argument is an error and the +// format string ends with ": %w", the returned error implements an Unwrap +// method returning it. +// +// If the format specifier includes a %w verb with an error operand in a +// position other than at the end, the returned error will still implement an +// Unwrap method returning the operand, but the error's Format method will not +// return the wrapped error. +// +// It is invalid to include more than one %w verb or to supply it with an +// operand that does not implement the error interface. The %w verb is otherwise +// a synonym for %v. +func Errorf(format string, a ...interface{}) error { + format = formatPlusW(format) + // Support a ": %[wsv]" suffix, which works well with xerrors.Formatter. + wrap := strings.HasSuffix(format, ": %w") + idx, format2, ok := parsePercentW(format) + percentWElsewhere := !wrap && idx >= 0 + if !percentWElsewhere && (wrap || strings.HasSuffix(format, ": %s") || strings.HasSuffix(format, ": %v")) { + err := errorAt(a, len(a)-1) + if err == nil { + return &noWrapError{fmt.Sprintf(format, a...), nil, Caller(1)} + } + // TODO: this is not entirely correct. The error value could be + // printed elsewhere in format if it mixes numbered with unnumbered + // substitutions. With relatively small changes to doPrintf we can + // have it optionally ignore extra arguments and pass the argument + // list in its entirety. + msg := fmt.Sprintf(format[:len(format)-len(": %s")], a[:len(a)-1]...) + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + if wrap { + return &wrapError{msg, err, frame} + } + return &noWrapError{msg, err, frame} + } + // Support %w anywhere. + // TODO: don't repeat the wrapped error's message when %w occurs in the middle. + msg := fmt.Sprintf(format2, a...) + if idx < 0 { + return &noWrapError{msg, nil, Caller(1)} + } + err := errorAt(a, idx) + if !ok || err == nil { + // Too many %ws or argument of %w is not an error. Approximate the Go + // 1.13 fmt.Errorf message. + return &noWrapError{fmt.Sprintf("%sw(%s)", percentBangString, msg), nil, Caller(1)} + } + frame := Frame{} + if internal.EnableTrace { + frame = Caller(1) + } + return &wrapError{msg, err, frame} +} + +func errorAt(args []interface{}, i int) error { + if i < 0 || i >= len(args) { + return nil + } + err, ok := args[i].(error) + if !ok { + return nil + } + return err +} + +// formatPlusW is used to avoid the vet check that will barf at %w. +func formatPlusW(s string) string { + return s +} + +// Return the index of the only %w in format, or -1 if none. +// Also return a rewritten format string with %w replaced by %v, and +// false if there is more than one %w. +// TODO: handle "%[N]w". +func parsePercentW(format string) (idx int, newFormat string, ok bool) { + // Loosely copied from golang.org/x/tools/go/analysis/passes/printf/printf.go. + idx = -1 + ok = true + n := 0 + sz := 0 + var isW bool + for i := 0; i < len(format); i += sz { + if format[i] != '%' { + sz = 1 + continue + } + // "%%" is not a format directive. + if i+1 < len(format) && format[i+1] == '%' { + sz = 2 + continue + } + sz, isW = parsePrintfVerb(format[i:]) + if isW { + if idx >= 0 { + ok = false + } else { + idx = n + } + // "Replace" the last character, the 'w', with a 'v'. + p := i + sz - 1 + format = format[:p] + "v" + format[p+1:] + } + n++ + } + return idx, format, ok +} + +// Parse the printf verb starting with a % at s[0]. +// Return how many bytes it occupies and whether the verb is 'w'. +func parsePrintfVerb(s string) (int, bool) { + // Assume only that the directive is a sequence of non-letters followed by a single letter. + sz := 0 + var r rune + for i := 1; i < len(s); i += sz { + r, sz = utf8.DecodeRuneInString(s[i:]) + if unicode.IsLetter(r) { + return i + sz, r == 'w' + } + } + return len(s), false +} + +type noWrapError struct { + msg string + err error + frame Frame +} + +func (e *noWrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *noWrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *noWrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +type wrapError struct { + msg string + err error + frame Frame +} + +func (e *wrapError) Error() string { + return fmt.Sprint(e) +} + +func (e *wrapError) Format(s fmt.State, v rune) { FormatError(e, s, v) } + +func (e *wrapError) FormatError(p Printer) (next error) { + p.Print(e.msg) + e.frame.Format(p) + return e.err +} + +func (e *wrapError) Unwrap() error { + return e.err +} diff --git a/vendor/golang.org/x/xerrors/format.go b/vendor/golang.org/x/xerrors/format.go new file mode 100644 index 0000000000000..1bc9c26b97fdf --- /dev/null +++ b/vendor/golang.org/x/xerrors/format.go @@ -0,0 +1,34 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +// A Formatter formats error messages. +type Formatter interface { + error + + // FormatError prints the receiver's first error and returns the next error in + // the error chain, if any. + FormatError(p Printer) (next error) +} + +// A Printer formats error messages. +// +// The most common implementation of Printer is the one provided by package fmt +// during Printf (as of Go 1.13). Localization packages such as golang.org/x/text/message +// typically provide their own implementations. +type Printer interface { + // Print appends args to the message output. + Print(args ...interface{}) + + // Printf writes a formatted string. + Printf(format string, args ...interface{}) + + // Detail reports whether error detail is requested. + // After the first call to Detail, all text written to the Printer + // is formatted as additional detail, or ignored when + // detail has not been requested. + // If Detail returns false, the caller can avoid printing the detail at all. + Detail() bool +} diff --git a/vendor/golang.org/x/xerrors/frame.go b/vendor/golang.org/x/xerrors/frame.go new file mode 100644 index 0000000000000..0de628ec501f6 --- /dev/null +++ b/vendor/golang.org/x/xerrors/frame.go @@ -0,0 +1,56 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "runtime" +) + +// A Frame contains part of a call stack. +type Frame struct { + // Make room for three PCs: the one we were asked for, what it called, + // and possibly a PC for skipPleaseUseCallersFrames. See: + // https://go.googlesource.com/go/+/032678e0fb/src/runtime/extern.go#169 + frames [3]uintptr +} + +// Caller returns a Frame that describes a frame on the caller's stack. +// The argument skip is the number of frames to skip over. +// Caller(0) returns the frame for the caller of Caller. +func Caller(skip int) Frame { + var s Frame + runtime.Callers(skip+1, s.frames[:]) + return s +} + +// location reports the file, line, and function of a frame. +// +// The returned function may be "" even if file and line are not. +func (f Frame) location() (function, file string, line int) { + frames := runtime.CallersFrames(f.frames[:]) + if _, ok := frames.Next(); !ok { + return "", "", 0 + } + fr, ok := frames.Next() + if !ok { + return "", "", 0 + } + return fr.Function, fr.File, fr.Line +} + +// Format prints the stack as error detail. +// It should be called from an error's Format implementation +// after printing any other error detail. +func (f Frame) Format(p Printer) { + if p.Detail() { + function, file, line := f.location() + if function != "" { + p.Printf("%s\n ", function) + } + if file != "" { + p.Printf("%s:%d\n", file, line) + } + } +} diff --git a/vendor/golang.org/x/xerrors/go.mod b/vendor/golang.org/x/xerrors/go.mod new file mode 100644 index 0000000000000..870d4f612dbf6 --- /dev/null +++ b/vendor/golang.org/x/xerrors/go.mod @@ -0,0 +1,3 @@ +module golang.org/x/xerrors + +go 1.11 diff --git a/vendor/golang.org/x/xerrors/internal/internal.go b/vendor/golang.org/x/xerrors/internal/internal.go new file mode 100644 index 0000000000000..89f4eca5df7bc --- /dev/null +++ b/vendor/golang.org/x/xerrors/internal/internal.go @@ -0,0 +1,8 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package internal + +// EnableTrace indicates whether stack information should be recorded in errors. +var EnableTrace = true diff --git a/vendor/golang.org/x/xerrors/wrap.go b/vendor/golang.org/x/xerrors/wrap.go new file mode 100644 index 0000000000000..9a3b510374ec8 --- /dev/null +++ b/vendor/golang.org/x/xerrors/wrap.go @@ -0,0 +1,106 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xerrors + +import ( + "reflect" +) + +// A Wrapper provides context around another error. +type Wrapper interface { + // Unwrap returns the next error in the error chain. + // If there is no next error, Unwrap returns nil. + Unwrap() error +} + +// Opaque returns an error with the same error formatting as err +// but that does not match err and cannot be unwrapped. +func Opaque(err error) error { + return noWrapper{err} +} + +type noWrapper struct { + error +} + +func (e noWrapper) FormatError(p Printer) (next error) { + if f, ok := e.error.(Formatter); ok { + return f.FormatError(p) + } + p.Print(e.error) + return nil +} + +// Unwrap returns the result of calling the Unwrap method on err, if err implements +// Unwrap. Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + u, ok := err.(Wrapper) + if !ok { + return nil + } + return u.Unwrap() +} + +// Is reports whether any error in err's chain matches target. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { + if target == nil { + return err == target + } + + isComparable := reflect.TypeOf(target).Comparable() + for { + if isComparable && err == target { + return true + } + if x, ok := err.(interface{ Is(error) bool }); ok && x.Is(target) { + return true + } + // TODO: consider supporing target.Is(err). This would allow + // user-definable predicates, but also may allow for coping with sloppy + // APIs, thereby making it easier to get away with them. + if err = Unwrap(err); err == nil { + return false + } + } +} + +// As finds the first error in err's chain that matches the type to which target +// points, and if so, sets the target to its value and returns true. An error +// matches a type if it is assignable to the target type, or if it has a method +// As(interface{}) bool such that As(target) returns true. As will panic if target +// is not a non-nil pointer to a type which implements error or is of interface type. +// +// The As method should set the target to its value and return true if err +// matches the type to which target points. +func As(err error, target interface{}) bool { + if target == nil { + panic("errors: target cannot be nil") + } + val := reflect.ValueOf(target) + typ := val.Type() + if typ.Kind() != reflect.Ptr || val.IsNil() { + panic("errors: target must be a non-nil pointer") + } + if e := typ.Elem(); e.Kind() != reflect.Interface && !e.Implements(errorType) { + panic("errors: *target must be interface or implement error") + } + targetType := typ.Elem() + for err != nil { + if reflect.TypeOf(err).AssignableTo(targetType) { + val.Elem().Set(reflect.ValueOf(err)) + return true + } + if x, ok := err.(interface{ As(interface{}) bool }); ok && x.As(target) { + return true + } + err = Unwrap(err) + } + return false +} + +var errorType = reflect.TypeOf((*error)(nil)).Elem() diff --git a/vendor/modules.txt b/vendor/modules.txt index 4c2fc02c67d11..aad5152b65802 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -93,8 +93,6 @@ github.com/aws/aws-sdk-go/private/protocol/query/queryutil github.com/aws/aws-sdk-go/private/protocol/rest github.com/aws/aws-sdk-go/private/protocol/restxml github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil -github.com/aws/aws-sdk-go/service/applicationautoscaling -github.com/aws/aws-sdk-go/service/applicationautoscaling/applicationautoscalingiface github.com/aws/aws-sdk-go/service/dynamodb github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface github.com/aws/aws-sdk-go/service/ec2 @@ -130,7 +128,7 @@ github.com/coreos/go-systemd/sdjournal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog github.com/coreos/pkg/dlopen -# github.com/cortexproject/cortex v0.7.1-0.20200316184320-acc42abdf56c +# github.com/cortexproject/cortex v1.0.0 github.com/cortexproject/cortex/pkg/chunk github.com/cortexproject/cortex/pkg/chunk/aws github.com/cortexproject/cortex/pkg/chunk/azure @@ -428,7 +426,7 @@ github.com/opentracing/opentracing-go/log # github.com/pierrec/lz4 v2.3.1-0.20191115212037-9085dacd1e1e+incompatible github.com/pierrec/lz4 github.com/pierrec/lz4/internal/xxh32 -# github.com/pkg/errors v0.8.1 +# github.com/pkg/errors v0.9.1 github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 github.com/pmezard/go-difflib/difflib @@ -521,6 +519,9 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require +# github.com/thanos-io/thanos v0.11.0 +github.com/thanos-io/thanos/pkg/discovery/dns +github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns # github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 github.com/tmc/grpc-websocket-proxy/wsproxy # github.com/tonistiigi/fifo v0.0.0-20190226154929-a9fb20d87448 @@ -686,7 +687,10 @@ golang.org/x/exp/cmd/apidiff # golang.org/x/lint v0.0.0-20190930215403-16217165b5de golang.org/x/lint golang.org/x/lint/golint -# golang.org/x/net v0.0.0-20191112182307-2180aed22343 => golang.org/x/net v0.0.0-20190923162816-aa69164e4478 +# golang.org/x/mod v0.2.0 +golang.org/x/mod/module +golang.org/x/mod/semver +# golang.org/x/net v0.0.0-20200226121028-0de0cce0169b => golang.org/x/net v0.0.0-20190923162816-aa69164e4478 golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -723,7 +727,7 @@ golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm # golang.org/x/time v0.0.0-20191024005414-555d28b269f0 golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2 +# golang.org/x/tools v0.0.0-20200306191617-51e69f71924f golang.org/x/tools/cmd/goimports golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/inspect @@ -737,11 +741,13 @@ golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath golang.org/x/tools/go/types/typeutil golang.org/x/tools/internal/fastwalk +golang.org/x/tools/internal/gocommand golang.org/x/tools/internal/gopathwalk golang.org/x/tools/internal/imports -golang.org/x/tools/internal/module -golang.org/x/tools/internal/semver -golang.org/x/tools/internal/span +golang.org/x/tools/internal/packagesinternal +# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 +golang.org/x/xerrors +golang.org/x/xerrors/internal # google.golang.org/api v0.14.0 google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/compute/v1