From 49603c91f71270ac5bfd23ac051eb14c1ff625f5 Mon Sep 17 00:00:00 2001 From: Martin Disibio Date: Mon, 8 Feb 2021 12:43:43 -0500 Subject: [PATCH 1/6] Update cortex dependency to latest 1.7.0+ commit --- cmd/tempo/app/app.go | 25 +- cmd/tempo/app/modules.go | 14 +- cmd/tempo/main.go | 18 +- go.mod | 25 +- go.sum | 118 +- modules/compactor/compactor.go | 20 +- modules/distributor/distributor.go | 4 +- modules/distributor/distributor_test.go | 8 +- modules/distributor/receiver/shim.go | 6 +- modules/ingester/client/client.go | 8 +- modules/ingester/flush.go | 16 +- modules/ingester/ingester.go | 8 +- modules/ingester/instance.go | 6 +- modules/querier/config.go | 11 +- modules/querier/querier.go | 12 +- pkg/ring/ring.go | 6 +- pkg/util/log_test.go | 4 +- pkg/util/trace.go | 4 +- tempodb/backend/s3/s3.go | 2 +- tempodb/tempodb.go | 4 +- vendor/cloud.google.com/go/.gitignore | 1 + vendor/cloud.google.com/go/CHANGES.md | 93 + vendor/cloud.google.com/go/CONTRIBUTING.md | 5 +- vendor/cloud.google.com/go/RELEASING.md | 65 +- vendor/cloud.google.com/go/go.mod | 20 +- vendor/cloud.google.com/go/go.sum | 48 +- .../go/internal/.repo-metadata-full.json | 74 +- .../go/internal/version/version.go | 2 +- .../go/longrunning/autogen/doc.go | 2 +- .../longrunning/autogen/operations_client.go | 4 +- vendor/cloud.google.com/go/testing.md | 236 + vendor/github.com/alecthomas/units/go.mod | 2 + vendor/github.com/armon/go-metrics/.gitignore | 2 + vendor/github.com/armon/go-metrics/metrics.go | 4 +- .../armon/go-metrics/prometheus/prometheus.go | 298 +- vendor/github.com/armon/go-metrics/start.go | 7 +- .../aws/aws-sdk-go/aws/endpoints/defaults.go | 388 +- .../aws/session/custom_transport.go | 27 + ...ransport.go => custom_transport_go1.12.go} | 4 +- ...sport_1_5.go => custom_transport_go1.5.go} | 2 +- ...sport_1_6.go => custom_transport_go1.6.go} | 2 +- .../aws/aws-sdk-go/aws/session/doc.go | 27 + .../aws/aws-sdk-go/aws/session/env_config.go | 25 +- .../aws/aws-sdk-go/aws/session/session.go | 187 +- .../aws-sdk-go/aws/session/shared_config.go | 13 + .../github.com/aws/aws-sdk-go/aws/version.go | 2 +- .../aws/aws-sdk-go/service/dynamodb/api.go | 6534 +++-- .../dynamodb/dynamodbiface/interface.go | 28 +- .../aws/aws-sdk-go/service/dynamodb/errors.go | 8 + .../aws/aws-sdk-go/service/s3/api.go | 401 +- vendor/github.com/blang/semver/.travis.yml | 21 - vendor/github.com/blang/semver/LICENSE | 22 - vendor/github.com/blang/semver/README.md | 194 - vendor/github.com/blang/semver/json.go | 23 - vendor/github.com/blang/semver/package.json | 17 - vendor/github.com/blang/semver/range.go | 416 - vendor/github.com/blang/semver/semver.go | 418 - vendor/github.com/blang/semver/sort.go | 28 - vendor/github.com/blang/semver/sql.go | 30 - .../cortex/integration/e2e/db/db.go | 14 +- .../cortex/integration/e2e/images/images.go | 2 +- .../cortex/integration/e2e/metrics.go | 10 + .../cortex/integration/e2e/service.go | 19 +- .../cortex/pkg/alertmanager/alertmanager.go | 74 +- .../pkg/alertmanager/alertmanager_http.go | 53 + .../pkg/alertmanager/alertmanager_metrics.go | 6 + .../pkg/alertmanager/alertmanager_ring.go | 114 + .../alertmanager/alerts/objectclient/store.go | 4 +- .../cortex/pkg/alertmanager/api.go | 8 +- .../cortex/pkg/alertmanager/lifecycle.go | 28 + .../cortex/pkg/alertmanager/multitenant.go | 443 +- .../cortex/pkg/alertmanager/storage.go | 25 +- .../cortexproject/cortex/pkg/api/api.go | 48 +- .../cortexproject/cortex/pkg/api/handlers.go | 40 +- .../cortex/pkg/api/middlewares.go | 4 +- .../pkg/chunk/aws/dynamodb_storage_client.go | 16 +- .../pkg/chunk/aws/dynamodb_table_client.go | 15 +- .../pkg/chunk/aws/metrics_autoscaling.go | 20 +- .../cortex/pkg/chunk/aws/mock.go | 4 +- .../cortex/pkg/chunk/aws/s3_storage_client.go | 61 +- .../cortex/pkg/chunk/aws/sse_config.go | 86 + .../pkg/chunk/azure/blob_storage_client.go | 3 +- .../cortex/pkg/chunk/cache/fifo_cache.go | 4 +- .../cortex/pkg/chunk/cache/memcached.go | 4 +- .../pkg/chunk/cache/memcached_client.go | 4 +- .../cortex/pkg/chunk/cache/redis_cache.go | 4 +- .../pkg/chunk/cassandra/storage_client.go | 6 +- .../cortex/pkg/chunk/chunk_store.go | 3 +- .../cortex/pkg/chunk/chunk_store_utils.go | 4 +- .../cortex/pkg/chunk/composite_store.go | 8 +- .../pkg/chunk/gcp/bigtable_index_client.go | 20 +- .../pkg/chunk/gcp/bigtable_object_client.go | 11 +- .../cortex/pkg/chunk/gcp/table_client.go | 7 +- .../pkg/chunk/inmemory_storage_client.go | 6 +- .../pkg/chunk/local/boltdb_index_client.go | 6 +- .../pkg/chunk/local/fs_object_client.go | 6 +- .../chunk/openstack/swift_object_client.go | 44 +- .../pkg/chunk/purger/blocks_purger_api.go | 7 +- .../cortex/pkg/chunk/purger/purger.go | 18 +- .../pkg/chunk/purger/request_handler.go | 11 +- .../cortex/pkg/chunk/purger/tombstones.go | 67 +- .../cortexproject/cortex/pkg/chunk/schema.go | 4 +- .../cortex/pkg/chunk/schema_config.go | 15 +- .../cortex/pkg/chunk/storage/factory.go | 10 +- .../cortex/pkg/chunk/table_manager.go | 28 +- .../cortex/pkg/chunk/util/util.go | 4 +- .../cortex/pkg/compactor/blocks_cleaner.go | 272 +- .../cortex/pkg/compactor/compactor.go | 249 +- .../cortex/pkg/compactor/compactor_http.go | 4 +- .../cortex/pkg/compactor/compactor_ring.go | 5 +- .../cortex/pkg/compactor/syncer_metrics.go | 5 +- .../cortex/pkg/configs/api/api.go | 13 +- .../cortex/pkg/configs/client/client.go | 4 +- .../pkg/configs/db/postgres/postgres.go | 11 +- .../cortex/pkg/configs/db/traced.go | 5 +- .../cortex/pkg/configs/userconfig/config.go | 6 +- .../cortexproject/cortex/pkg/cortex/cortex.go | 69 +- .../cortex/pkg/cortex/modules.go | 109 +- .../cortex/pkg/cortex/runtime_config.go | 61 +- .../cortex/pkg/cortex/server_service.go | 4 +- .../cortex/pkg/distributor/distributor.go | 188 +- .../pkg/distributor/distributor_ring.go | 4 +- .../cortex/pkg/distributor/ha_tracker.go | 118 +- .../cortex/pkg/distributor/query.go | 19 +- .../cortex/pkg/flusher/flusher.go | 8 +- .../cortex/pkg/frontend/transport/handler.go | 35 +- .../cortex/pkg/frontend/v1/frontend.go | 13 +- .../frontend/v1/frontendv1pb/frontend.pb.go | 107 +- .../frontend/v1/frontendv1pb/frontend.proto | 4 + .../cortex/pkg/frontend/v2/frontend.go | 28 +- .../frontend/v2/frontend_scheduler_worker.go | 1 + .../cortex/pkg/ingester/client/client.go | 2 +- .../cortex/pkg/ingester/client/cortex.pb.go | 319 +- .../cortex/pkg/ingester/client/cortex.proto | 5 + .../cortex/pkg/ingester/flush.go | 17 +- .../cortex/pkg/ingester/ingester.go | 54 +- .../cortex/pkg/ingester/ingester_v2.go | 293 +- .../cortex/pkg/ingester/limiter.go | 5 +- .../cortex/pkg/ingester/mapper.go | 12 +- .../cortex/pkg/ingester/metrics.go | 14 + .../cortex/pkg/ingester/transfer.go | 32 +- .../pkg/ingester/user_metrics_metadata.go | 4 +- .../cortex/pkg/ingester/user_state.go | 10 +- .../cortexproject/cortex/pkg/ingester/wal.go | 63 +- .../cortex/pkg/querier/astmapper/parallel.go | 4 +- .../pkg/querier/blocks_finder_bucket_index.go | 108 + ...canner.go => blocks_finder_bucket_scan.go} | 47 +- .../pkg/querier/blocks_store_balanced_set.go | 8 +- .../pkg/querier/blocks_store_queryable.go | 41 +- .../querier/blocks_store_replicated_set.go | 10 +- .../pkg/querier/distributor_queryable.go | 11 +- .../cortex/pkg/querier/querier.go | 23 +- .../cortex/pkg/querier/queryrange/limits.go | 7 +- .../pkg/querier/queryrange/query_range.go | 16 +- .../pkg/querier/queryrange/results_cache.go | 94 +- .../cortex/pkg/querier/queryrange/retry.go | 4 +- .../pkg/querier/queryrange/roundtrip.go | 4 +- .../cortex/pkg/querier/queryrange/util.go | 5 +- .../cortex/pkg/querier/remote_read.go | 11 +- .../cortex/pkg/querier/stats/stats.go | 7 + .../pkg/querier/stats/time_middleware.go | 5 +- .../pkg/querier/store_gateway_client.go | 29 +- .../tenantfederation/merge_queryable.go | 309 + .../tenantfederation/tenant_federation.go | 14 + .../pkg/querier/worker/frontend_processor.go | 22 +- .../pkg/querier/worker/scheduler_processor.go | 31 +- .../cortex/pkg/querier/worker/worker.go | 5 +- .../cortex/pkg/ring/basic_lifecycler.go | 22 +- .../pkg/ring/basic_lifecycler_delegates.go | 20 +- .../cortexproject/cortex/pkg/ring/batch.go | 41 +- .../cortex/pkg/ring/client/pool.go | 7 +- .../pkg/ring/client/ring_service_discovery.go | 5 + .../cortexproject/cortex/pkg/ring/http.go | 10 +- .../cortex/pkg/ring/kv/consul/client.go | 32 +- .../cortex/pkg/ring/kv/consul/mock.go | 20 +- .../cortex/pkg/ring/kv/etcd/etcd.go | 47 +- .../pkg/ring/kv/memberlist/broadcast.go | 4 +- .../pkg/ring/kv/memberlist/kv_init_service.go | 346 +- .../ring/kv/memberlist/memberlist_client.go | 157 +- .../ring/kv/memberlist/memberlist_logger.go | 2 +- .../cortex/pkg/ring/kv/memberlist/metrics.go | 4 +- .../cortex/pkg/ring/kv/metrics.go | 14 +- .../cortexproject/cortex/pkg/ring/kv/mock.go | 4 +- .../cortexproject/cortex/pkg/ring/kv/multi.go | 4 +- .../cortex/pkg/ring/lifecycler.go | 100 +- .../cortexproject/cortex/pkg/ring/model.go | 221 +- .../cortex/pkg/ring/replication_set.go | 8 +- .../pkg/ring/replication_set_tracker.go | 10 +- .../cortex/pkg/ring/replication_strategy.go | 99 +- .../cortexproject/cortex/pkg/ring/ring.go | 292 +- .../cortexproject/cortex/pkg/ring/ring.pb.go | 134 +- .../cortexproject/cortex/pkg/ring/ring.proto | 4 +- .../cortexproject/cortex/pkg/ring/util.go | 23 +- .../cortexproject/cortex/pkg/ruler/api.go | 16 +- .../cortexproject/cortex/pkg/ruler/compat.go | 36 +- .../cortex/pkg/ruler/lifecycle.go | 6 +- .../cortexproject/cortex/pkg/ruler/manager.go | 5 +- .../cortex/pkg/ruler/manager_metrics.go | 29 +- .../cortex/pkg/ruler/notifier.go | 39 +- .../cortexproject/cortex/pkg/ruler/ruler.go | 21 +- .../pkg/ruler/ruler_replication_strategy.go | 37 - .../cortex/pkg/ruler/ruler_ring.go | 13 +- .../ruler/rules/objectclient/rule_store.go | 12 +- .../cortex/pkg/scheduler/scheduler.go | 14 +- .../pkg/scheduler/schedulerpb/scheduler.pb.go | 178 +- .../pkg/scheduler/schedulerpb/scheduler.proto | 5 + .../cortex/pkg/storage/bucket/bucket_util.go | 33 + .../cortex/pkg/storage/bucket/client_mock.go | 10 + .../pkg/storage/bucket/s3/bucket_client.go | 5 + .../cortex/pkg/storage/bucket/s3/config.go | 10 + .../pkg/storage/bucket/swift/bucket_client.go | 11 +- .../cortex/pkg/storage/bucket/swift/config.go | 37 +- .../pkg/storage/tsdb/bucketindex/index.go | 69 +- .../pkg/storage/tsdb/bucketindex/loader.go | 275 + .../pkg/storage/tsdb/bucketindex/markers.go | 51 + .../tsdb/bucketindex/markers_bucket_client.go | 18 + .../pkg/storage/tsdb/bucketindex/reader.go | 50 - .../pkg/storage/tsdb/bucketindex/storage.go | 92 + .../bucketindex/{writer.go => updater.go} | 106 +- .../cortex/pkg/storage/tsdb/caching_bucket.go | 44 +- .../cortex/pkg/storage/tsdb/config.go | 34 +- .../pkg/storage/tsdb/tenant_deletion_mark.go | 44 +- .../bucket_index_metadata_fetcher.go | 236 + .../cortex/pkg/storegateway/bucket_stores.go | 94 +- .../cortex/pkg/storegateway/gateway.go | 12 +- .../cortex/pkg/storegateway/gateway_http.go | 4 +- .../cortex/pkg/storegateway/gateway_ring.go | 19 +- .../storegateway/metadata_fetcher_filters.go | 78 + .../storegateway/metadata_fetcher_metrics.go | 2 + .../pkg/storegateway/replication_strategy.go | 45 - .../pkg/storegateway/sharding_strategy.go | 5 +- .../cortexproject/cortex/pkg/tenant/tenant.go | 16 + .../cortex/pkg/util/active_user.go | 91 + .../cortexproject/cortex/pkg/util/config.go | 68 + .../cortex/pkg/util/flagext/deprecated.go | 4 +- .../cortex/pkg/util/grpcclient/grpcclient.go | 72 +- .../cortexproject/cortex/pkg/util/http.go | 197 +- .../cortex/pkg/util/{ => log}/experimental.go | 2 +- .../cortex/pkg/util/{ => log}/log.go | 56 +- .../cortex/pkg/util/log/wrappers.go | 53 + .../cortex/pkg/util/{ => math}/math.go | 2 +- .../cortex/pkg/util/metrics_helper.go | 108 +- .../cortex/pkg/util/module_service.go | 13 +- .../cortexproject/cortex/pkg/util/net.go | 8 +- .../cortex/pkg/util/push/push.go | 16 +- .../cortex/pkg/util/runtimeconfig/manager.go | 28 +- .../cortex/pkg/util/spanlogger/spanlogger.go | 10 +- .../cortexproject/cortex/pkg/util/strings.go | 9 + .../cortexproject/cortex/pkg/util/tls/tls.go | 22 +- .../cortex/pkg/util/validation/limits.go | 91 +- .../cortex/pkg/util/validation/validate.go | 21 +- .../cortexproject/cortex/pkg/util/yaml.go | 19 + .../github.com/golang/snappy/decode_arm64.s | 45 +- .../github.com/golang/snappy/encode_arm64.s | 81 +- .../github.com/google/go-cmp/cmp/compare.go | 6 +- .../google/go-cmp/cmp/export_panic.go | 2 +- .../google/go-cmp/cmp/export_unsafe.go | 2 +- .../go-cmp/cmp/internal/diff/debug_disable.go | 2 +- .../go-cmp/cmp/internal/diff/debug_enable.go | 2 +- .../google/go-cmp/cmp/internal/diff/diff.go | 50 +- .../google/go-cmp/cmp/internal/flags/flags.go | 2 +- .../cmp/internal/flags/toolchain_legacy.go | 2 +- .../cmp/internal/flags/toolchain_recent.go | 2 +- .../go-cmp/cmp/internal/function/func.go | 2 +- .../google/go-cmp/cmp/internal/value/name.go | 2 +- .../cmp/internal/value/pointer_purego.go | 2 +- .../cmp/internal/value/pointer_unsafe.go | 2 +- .../google/go-cmp/cmp/internal/value/sort.go | 2 +- .../google/go-cmp/cmp/internal/value/zero.go | 2 +- .../github.com/google/go-cmp/cmp/options.go | 2 +- vendor/github.com/google/go-cmp/cmp/path.go | 2 +- vendor/github.com/google/go-cmp/cmp/report.go | 2 +- .../google/go-cmp/cmp/report_compare.go | 2 +- .../google/go-cmp/cmp/report_references.go | 2 +- .../google/go-cmp/cmp/report_reflect.go | 4 +- .../google/go-cmp/cmp/report_slices.go | 2 +- .../google/go-cmp/cmp/report_text.go | 2 +- .../google/go-cmp/cmp/report_value.go | 2 +- .../github.com/google/pprof/profile/merge.go | 5 +- .../google/pprof/profile/profile.go | 18 +- vendor/github.com/google/uuid/README.md | 2 +- vendor/github.com/google/uuid/marshal.go | 7 +- vendor/github.com/google/uuid/version1.go | 12 +- vendor/github.com/google/uuid/version4.go | 7 +- .../gophercloud/gophercloud/.gitignore | 3 - .../gophercloud/gophercloud/.travis.yml | 25 - .../gophercloud/gophercloud/.zuul.yaml | 136 - .../gophercloud/gophercloud/CHANGELOG.md | 437 - .../gophercloud/gophercloud/LICENSE | 191 - .../gophercloud/gophercloud/README.md | 166 - .../gophercloud/gophercloud/auth_options.go | 514 - .../gophercloud/gophercloud/auth_result.go | 52 - .../github.com/gophercloud/gophercloud/doc.go | 110 - .../gophercloud/endpoint_search.go | 76 - .../gophercloud/gophercloud/errors.go | 490 - .../github.com/gophercloud/gophercloud/go.mod | 11 - .../github.com/gophercloud/gophercloud/go.sum | 19 - .../gophercloud/openstack/auth_env.go | 128 - .../gophercloud/openstack/client.go | 503 - .../gophercloud/gophercloud/openstack/doc.go | 14 - .../openstack/endpoint_location.go | 111 - .../gophercloud/openstack/errors.go | 47 - .../openstack/identity/v2/tenants/doc.go | 65 - .../openstack/identity/v2/tenants/requests.go | 120 - .../openstack/identity/v2/tenants/results.go | 91 - .../openstack/identity/v2/tenants/urls.go | 23 - .../openstack/identity/v2/tokens/doc.go | 46 - .../openstack/identity/v2/tokens/requests.go | 105 - .../openstack/identity/v2/tokens/results.go | 174 - .../openstack/identity/v2/tokens/urls.go | 13 - .../identity/v3/extensions/ec2tokens/doc.go | 41 - .../v3/extensions/ec2tokens/requests.go | 377 - .../identity/v3/extensions/ec2tokens/urls.go | 11 - .../identity/v3/extensions/oauth1/doc.go | 123 - .../identity/v3/extensions/oauth1/requests.go | 587 - .../identity/v3/extensions/oauth1/results.go | 305 - .../identity/v3/extensions/oauth1/urls.go | 43 - .../openstack/identity/v3/tokens/doc.go | 108 - .../openstack/identity/v3/tokens/requests.go | 174 - .../openstack/identity/v3/tokens/results.go | 194 - .../openstack/identity/v3/tokens/urls.go | 7 - .../objectstorage/v1/accounts/doc.go | 29 - .../objectstorage/v1/accounts/requests.go | 94 - .../objectstorage/v1/accounts/results.go | 112 - .../objectstorage/v1/accounts/urls.go | 11 - .../objectstorage/v1/containers/doc.go | 95 - .../objectstorage/v1/containers/requests.go | 250 - .../objectstorage/v1/containers/results.go | 301 - .../objectstorage/v1/containers/urls.go | 27 - .../openstack/objectstorage/v1/objects/doc.go | 110 - .../objectstorage/v1/objects/errors.go | 13 - .../objectstorage/v1/objects/requests.go | 527 - .../objectstorage/v1/objects/results.go | 534 - .../objectstorage/v1/objects/urls.go | 37 - .../openstack/utils/base_endpoint.go | 28 - .../openstack/utils/choose_version.go | 111 - .../gophercloud/pagination/http.go | 61 - .../gophercloud/pagination/linked.go | 92 - .../gophercloud/pagination/marker.go | 58 - .../gophercloud/pagination/pager.go | 251 - .../gophercloud/gophercloud/pagination/pkg.go | 4 - .../gophercloud/pagination/single.go | 33 - .../gophercloud/gophercloud/params.go | 493 - .../gophercloud/provider_client.go | 566 - .../gophercloud/gophercloud/results.go | 460 - .../gophercloud/gophercloud/service_client.go | 162 - .../gophercloud/gophercloud/util.go | 102 - .../github.com/hashicorp/consul/api/agent.go | 20 +- vendor/github.com/hashicorp/consul/api/api.go | 13 +- .../hashicorp/consul/api/config_entry.go | 27 +- .../consul/api/config_entry_discoverychain.go | 107 +- .../consul/api/config_entry_gateways.go | 16 + .../consul/api/config_entry_intentions.go | 80 + .../hashicorp/consul/api/connect_intention.go | 125 +- .../hashicorp/consul/api/discovery_chain.go | 3 + vendor/github.com/hashicorp/consul/api/go.mod | 4 +- vendor/github.com/hashicorp/consul/api/go.sum | 4 +- .../github.com/hashicorp/consul/api/lock.go | 6 +- .../hashicorp/consul/api/namespace.go | 6 +- .../consul/api/operator_autopilot.go | 133 + .../github.com/hashicorp/consul/api/status.go | 22 +- .../hashicorp/serf/coordinate/phantom.go | 4 +- vendor/github.com/ncw/swift/.travis.yml | 10 +- vendor/github.com/ncw/swift/README.md | 2 + vendor/github.com/ncw/swift/largeobjects.go | 2 +- vendor/github.com/ncw/swift/swift.go | 43 +- .../prometheus/process_collector.go | 21 + .../client_golang/prometheus/promhttp/http.go | 10 +- .../prometheus/promhttp/instrument_server.go | 91 +- .../client_golang/prometheus/registry.go | 2 + .../prometheus/discovery/dns/dns.go | 5 +- .../prometheus/pkg/timestamp/timestamp.go | 10 +- .../prometheus/prometheus/promql/engine.go | 445 +- .../prometheus/prometheus/promql/functions.go | 35 +- .../prometheus/promql/parser/ast.go | 59 +- .../promql/parser/generated_parser.y | 29 +- .../promql/parser/generated_parser.y.go | 1035 +- .../prometheus/promql/parser/lex.go | 3 +- .../prometheus/promql/parser/parse.go | 106 +- .../prometheus/promql/parser/printer.go | 42 +- .../prometheus/prometheus/promql/test.go | 184 +- .../prometheus/storage/interface.go | 2 + .../storage/remote/queue_manager.go | 4 +- .../prometheus/prometheus/tsdb/README.md | 3 +- .../prometheus/prometheus/tsdb/blockwriter.go | 5 +- .../prometheus/tsdb/chunks/head_chunks.go | 2 +- .../prometheus/prometheus/tsdb/compact.go | 7 +- .../prometheus/prometheus/tsdb/db.go | 24 +- .../prometheus/prometheus/tsdb/head.go | 82 +- .../prometheus/tsdb/index/postings.go | 4 + .../prometheus/prometheus/web/api/v1/api.go | 229 +- .../thanos-io/thanos/pkg/block/block.go | 43 +- .../thanos-io/thanos/pkg/block/fetcher.go | 3 + .../pkg/block/indexheader/binary_reader.go | 48 +- .../block/indexheader/lazy_binary_reader.go | 51 +- .../pkg/block/indexheader/reader_pool.go | 20 +- .../thanos-io/thanos/pkg/compact/compact.go | 2 +- .../pkg/compact/downsample/downsample.go | 3 +- .../pkg/discovery/dns/godns/resolver.go | 25 + .../pkg/discovery/dns/miekgdns/lookup.go | 4 +- .../pkg/discovery/dns/miekgdns/resolver.go | 4 + .../thanos/pkg/discovery/dns/provider.go | 7 +- .../thanos/pkg/discovery/dns/resolver.go | 17 +- .../{multierror.go.go => multierror.go} | 0 .../thanos-io/thanos/pkg/objstore/s3/s3.go | 20 +- .../thanos/pkg/objstore/swift/swift.go | 463 +- .../thanos/pkg/promclient/promclient.go | 21 +- .../thanos-io/thanos/pkg/store/bucket.go | 261 +- .../thanos/pkg/store/labelpb/label.go | 63 +- .../thanos-io/thanos/pkg/store/limiter.go | 17 + .../thanos-io/thanos/pkg/store/local.go | 9 +- .../thanos-io/thanos/pkg/store/prometheus.go | 93 +- .../thanos-io/thanos/pkg/store/proxy.go | 134 +- .../thanos/pkg/store/storepb/custom.go | 9 +- .../thanos/pkg/store/storepb/inprocess.go | 97 + .../pkg/store/storepb/prompb/types.pb.go | 1 + .../pkg/store/storepb/prompb/types.proto | 1 + .../thanos-io/thanos/pkg/store/tsdb.go | 27 +- .../weaveworks/common/httpgrpc/README.md | 2 +- .../common/httpgrpc/server/server.go | 12 +- .../weaveworks/common/tracing/tracing.go | 4 +- vendor/go.opencensus.io/go.mod | 2 +- .../plugin/ocgrpc/client_metrics.go | 2 + .../plugin/ocgrpc/server_metrics.go | 2 + .../stats/view/aggregation.go | 18 +- .../stats/view/aggregation_data.go | 55 +- .../go.opencensus.io/stats/view/collector.go | 2 +- .../stats/view/view_to_metric.go | 13 +- vendor/go.opencensus.io/stats/view/worker.go | 36 +- vendor/go.opencensus.io/trace/trace.go | 35 +- .../x/crypto/argon2/blamka_amd64.go | 2 +- .../golang.org/x/crypto/argon2/blamka_amd64.s | 2 +- .../golang.org/x/crypto/argon2/blamka_ref.go | 2 +- .../x/crypto/blake2b/blake2bAVX2_amd64.go | 2 +- .../x/crypto/blake2b/blake2bAVX2_amd64.s | 2 +- .../x/crypto/blake2b/blake2b_amd64.go | 2 +- .../x/crypto/blake2b/blake2b_amd64.s | 2 +- .../x/crypto/blake2b/blake2b_ref.go | 2 +- vendor/golang.org/x/net/http2/transport.go | 4 +- .../internal/socket/zsys_openbsd_mips64.go | 50 + vendor/golang.org/x/net/publicsuffix/table.go | 19976 ++++++++-------- vendor/golang.org/x/oauth2/README.md | 10 +- vendor/golang.org/x/sys/unix/mkerrors.sh | 1 + vendor/golang.org/x/sys/unix/ptrace_darwin.go | 11 + vendor/golang.org/x/sys/unix/ptrace_ios.go | 11 + .../x/sys/unix/syscall_darwin.1_13.go | 1 - .../x/sys/unix/syscall_darwin_386.go | 2 +- .../x/sys/unix/syscall_darwin_amd64.go | 2 +- .../x/sys/unix/syscall_darwin_arm.go | 2 +- .../x/sys/unix/syscall_darwin_arm64.go | 2 +- vendor/golang.org/x/sys/unix/syscall_linux.go | 87 +- vendor/golang.org/x/sys/unix/timestruct.go | 26 +- vendor/golang.org/x/sys/unix/zerrors_linux.go | 24 +- .../x/sys/unix/zerrors_linux_386.go | 2 +- .../x/sys/unix/zerrors_linux_amd64.go | 2 +- .../x/sys/unix/zerrors_linux_arm.go | 2 +- .../x/sys/unix/zerrors_linux_arm64.go | 4 +- .../x/sys/unix/zerrors_linux_mips.go | 2 +- .../x/sys/unix/zerrors_linux_mips64.go | 2 +- .../x/sys/unix/zerrors_linux_mips64le.go | 2 +- .../x/sys/unix/zerrors_linux_mipsle.go | 2 +- .../x/sys/unix/zerrors_linux_ppc64.go | 2 +- .../x/sys/unix/zerrors_linux_ppc64le.go | 2 +- .../x/sys/unix/zerrors_linux_riscv64.go | 2 +- .../x/sys/unix/zerrors_linux_s390x.go | 2 +- .../x/sys/unix/zerrors_linux_sparc64.go | 2 +- .../x/sys/unix/zsyscall_darwin_386.1_13.go | 2 - .../x/sys/unix/zsyscall_darwin_386.go | 144 +- .../x/sys/unix/zsyscall_darwin_amd64.1_13.go | 2 - .../x/sys/unix/zsyscall_darwin_amd64.go | 144 +- .../x/sys/unix/zsyscall_darwin_arm.1_13.go | 2 - .../x/sys/unix/zsyscall_darwin_arm.go | 141 - .../x/sys/unix/zsyscall_darwin_arm64.1_13.go | 2 - .../x/sys/unix/zsyscall_darwin_arm64.go | 144 +- .../x/sys/unix/zsysnum_linux_386.go | 1 + .../x/sys/unix/zsysnum_linux_amd64.go | 1 + .../x/sys/unix/zsysnum_linux_arm.go | 1 + .../x/sys/unix/zsysnum_linux_arm64.go | 1 + .../x/sys/unix/zsysnum_linux_mips.go | 1 + .../x/sys/unix/zsysnum_linux_mips64.go | 1 + .../x/sys/unix/zsysnum_linux_mips64le.go | 1 + .../x/sys/unix/zsysnum_linux_mipsle.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64.go | 1 + .../x/sys/unix/zsysnum_linux_ppc64le.go | 1 + .../x/sys/unix/zsysnum_linux_riscv64.go | 1 + .../x/sys/unix/zsysnum_linux_s390x.go | 1 + .../x/sys/unix/zsysnum_linux_sparc64.go | 1 + .../golang.org/x/sys/unix/ztypes_aix_ppc.go | 1 + .../golang.org/x/sys/unix/ztypes_aix_ppc64.go | 1 + .../x/sys/unix/ztypes_darwin_386.go | 1 + .../x/sys/unix/ztypes_darwin_amd64.go | 1 + .../x/sys/unix/ztypes_darwin_arm.go | 1 + .../x/sys/unix/ztypes_darwin_arm64.go | 1 + .../x/sys/unix/ztypes_dragonfly_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_386.go | 1 + .../x/sys/unix/ztypes_freebsd_amd64.go | 1 + .../x/sys/unix/ztypes_freebsd_arm.go | 1 + .../x/sys/unix/ztypes_freebsd_arm64.go | 1 + vendor/golang.org/x/sys/unix/ztypes_linux.go | 1033 +- .../golang.org/x/sys/unix/ztypes_linux_386.go | 2 +- .../x/sys/unix/ztypes_linux_amd64.go | 2 +- .../golang.org/x/sys/unix/ztypes_linux_arm.go | 2 +- .../x/sys/unix/ztypes_linux_arm64.go | 2 +- .../x/sys/unix/ztypes_linux_mips.go | 2 +- .../x/sys/unix/ztypes_linux_mips64.go | 2 +- .../x/sys/unix/ztypes_linux_mips64le.go | 2 +- .../x/sys/unix/ztypes_linux_mipsle.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64.go | 2 +- .../x/sys/unix/ztypes_linux_ppc64le.go | 2 +- .../x/sys/unix/ztypes_linux_riscv64.go | 2 +- .../x/sys/unix/ztypes_linux_s390x.go | 2 +- .../x/sys/unix/ztypes_linux_sparc64.go | 2 +- .../x/sys/unix/ztypes_netbsd_386.go | 1 + .../x/sys/unix/ztypes_netbsd_amd64.go | 1 + .../x/sys/unix/ztypes_netbsd_arm.go | 1 + .../x/sys/unix/ztypes_netbsd_arm64.go | 1 + .../x/sys/unix/ztypes_openbsd_386.go | 1 + .../x/sys/unix/ztypes_openbsd_amd64.go | 1 + .../x/sys/unix/ztypes_openbsd_arm.go | 1 + .../x/sys/unix/ztypes_openbsd_arm64.go | 1 + .../x/sys/unix/ztypes_openbsd_mips64.go | 1 + .../x/sys/unix/ztypes_solaris_amd64.go | 1 + .../golang.org/x/sys/windows/dll_windows.go | 1 - .../x/sys/windows/security_windows.go | 11 + vendor/golang.org/x/sys/windows/service.go | 6 + .../x/sys/windows/syscall_windows.go | 5 +- .../golang.org/x/sys/windows/types_windows.go | 37 + .../x/sys/windows/zsyscall_windows.go | 80 +- .../go/analysis/passes/structtag/structtag.go | 94 +- vendor/golang.org/x/tools/go/ssa/mode.go | 5 + .../x/tools/internal/event/core/event.go | 2 +- .../x/tools/internal/imports/mod.go | 6 +- .../x/tools/internal/typesinternal/types.go | 17 + .../v1/cloudresourcemanager-api.json | 161 +- .../v1/cloudresourcemanager-gen.go | 275 +- .../google.golang.org/api/internal/creds.go | 23 +- .../api/internal/settings.go | 11 + .../option/internaloption/internaloption.go | 29 + .../api/storage/v1/storage-api.json | 9 +- .../api/storage/v1/storage-gen.go | 110 +- .../api/transport/cert/default_cert.go | 41 +- .../admin/v2/bigtable_instance_admin.pb.go | 505 +- .../admin/v2/bigtable_table_admin.pb.go | 2118 +- vendor/gopkg.in/yaml.v2/.travis.yml | 1 + vendor/gopkg.in/yaml.v2/apic.go | 6 +- vendor/gopkg.in/yaml.v2/go.mod | 8 +- vendor/gopkg.in/yaml.v2/yaml.go | 14 +- vendor/modules.txt | 81 +- 548 files changed, 28885 insertions(+), 30585 deletions(-) create mode 100644 vendor/cloud.google.com/go/testing.md create mode 100644 vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go rename vendor/github.com/aws/aws-sdk-go/aws/session/{cabundle_transport.go => custom_transport_go1.12.go} (88%) rename vendor/github.com/aws/aws-sdk-go/aws/session/{cabundle_transport_1_5.go => custom_transport_go1.5.go} (88%) rename vendor/github.com/aws/aws-sdk-go/aws/session/{cabundle_transport_1_6.go => custom_transport_go1.6.go} (90%) delete mode 100644 vendor/github.com/blang/semver/.travis.yml delete mode 100644 vendor/github.com/blang/semver/LICENSE delete mode 100644 vendor/github.com/blang/semver/README.md delete mode 100644 vendor/github.com/blang/semver/json.go delete mode 100644 vendor/github.com/blang/semver/package.json delete mode 100644 vendor/github.com/blang/semver/range.go delete mode 100644 vendor/github.com/blang/semver/semver.go delete mode 100644 vendor/github.com/blang/semver/sort.go delete mode 100644 vendor/github.com/blang/semver/sql.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/chunk/aws/sse_config.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go rename vendor/github.com/cortexproject/cortex/pkg/querier/{blocks_scanner.go => blocks_finder_bucket_scan.go} (85%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_replication_strategy.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/loader.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/storage.go rename vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/{writer.go => updater.go} (65%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go delete mode 100644 vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/active_user.go create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/config.go rename vendor/github.com/cortexproject/cortex/pkg/util/{ => log}/experimental.go (97%) rename vendor/github.com/cortexproject/cortex/pkg/util/{ => log}/log.go (65%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go rename vendor/github.com/cortexproject/cortex/pkg/util/{ => math}/math.go (97%) create mode 100644 vendor/github.com/cortexproject/cortex/pkg/util/yaml.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/.gitignore delete mode 100644 vendor/github.com/gophercloud/gophercloud/.travis.yml delete mode 100644 vendor/github.com/gophercloud/gophercloud/.zuul.yaml delete mode 100644 vendor/github.com/gophercloud/gophercloud/CHANGELOG.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/LICENSE delete mode 100644 vendor/github.com/gophercloud/gophercloud/README.md delete mode 100644 vendor/github.com/gophercloud/gophercloud/auth_options.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/auth_result.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/endpoint_search.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/go.mod delete mode 100644 vendor/github.com/gophercloud/gophercloud/go.sum delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/client.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/http.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/linked.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/marker.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pager.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/pkg.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/pagination/single.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/params.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/provider_client.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/results.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/service_client.go delete mode 100644 vendor/github.com/gophercloud/gophercloud/util.go create mode 100644 vendor/github.com/hashicorp/consul/api/config_entry_intentions.go create mode 100644 vendor/github.com/thanos-io/thanos/pkg/discovery/dns/godns/resolver.go rename vendor/github.com/thanos-io/thanos/pkg/errutil/{multierror.go.go => multierror.go} (100%) create mode 100644 vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go create mode 100644 vendor/golang.org/x/net/internal/socket/zsys_openbsd_mips64.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_darwin.go create mode 100644 vendor/golang.org/x/sys/unix/ptrace_ios.go diff --git a/cmd/tempo/app/app.go b/cmd/tempo/app/app.go index 5038b5d8178..f928f3e1105 100644 --- a/cmd/tempo/app/app.go +++ b/cmd/tempo/app/app.go @@ -13,6 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpc/healthcheck" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" @@ -92,26 +93,26 @@ func (c *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) { // CheckConfig checks if config values are suspect. func (c *Config) CheckConfig() { if c.Ingester.CompleteBlockTimeout < c.StorageConfig.Trace.BlocklistPoll { - level.Warn(util.Logger).Log("msg", "ingester.complete_block_timeout < storage.trace.blocklist_poll", + level.Warn(log.Logger).Log("msg", "ingester.complete_block_timeout < storage.trace.blocklist_poll", "explan", "You may receive 404s between the time the ingesters have flushed a trace and the querier is aware of the new block") } if c.Compactor.Compactor.BlockRetention < c.StorageConfig.Trace.BlocklistPoll { - level.Warn(util.Logger).Log("msg", "compactor.compaction.compacted_block_timeout < storage.trace.blocklist_poll", + level.Warn(log.Logger).Log("msg", "compactor.compaction.compacted_block_timeout < storage.trace.blocklist_poll", "explan", "Queriers and Compactors may attempt to read a block that no longer exists") } if c.Compactor.Compactor.RetentionConcurrency == 0 { - level.Warn(util.Logger).Log("msg", "c.Compactor.Compactor.RetentionConcurrency must be greater than zero. Using default.", "default", tempodb.DefaultRetentionConcurrency) + level.Warn(log.Logger).Log("msg", "c.Compactor.Compactor.RetentionConcurrency must be greater than zero. Using default.", "default", tempodb.DefaultRetentionConcurrency) } if c.StorageConfig.Trace.Backend == "s3" && c.Compactor.Compactor.FlushSizeBytes < 5242880 { - level.Warn(util.Logger).Log("msg", "c.Compactor.Compactor.FlushSizeBytes < 5242880", + level.Warn(log.Logger).Log("msg", "c.Compactor.Compactor.FlushSizeBytes < 5242880", "explan", "Compaction flush size should be 5MB or higher for S3 backend") } if c.StorageConfig.Trace.BlocklistPollConcurrency == 0 { - level.Warn(util.Logger).Log("msg", "c.StorageConfig.Trace.BlocklistPollConcurrency must be greater than zero. Using default.", "default", tempodb.DefaultBlocklistPollConcurrency) + level.Warn(log.Logger).Log("msg", "c.StorageConfig.Trace.BlocklistPollConcurrency must be greater than zero. Using default.", "default", tempodb.DefaultBlocklistPollConcurrency) } } @@ -193,7 +194,7 @@ func (t *App) setupAuthMiddleware() { // Run starts, and blocks until a signal is received. func (t *App) Run() error { if !t.moduleManager.IsUserVisibleModule(t.cfg.Target) { - level.Warn(util.Logger).Log("msg", "selected target is an internal module, is this intended?", "target", t.cfg.Target) + level.Warn(log.Logger).Log("msg", "selected target is an internal module, is this intended?", "target", t.cfg.Target) } serviceMap, err := t.moduleManager.InitModuleServices(t.cfg.Target) @@ -218,8 +219,8 @@ func (t *App) Run() error { grpc_health_v1.RegisterHealthServer(t.server.GRPC, healthcheck.New(sm)) // Let's listen for events from this manager, and log them. - healthy := func() { level.Info(util.Logger).Log("msg", "Tempo started") } - stopped := func() { level.Info(util.Logger).Log("msg", "Tempo stopped") } + healthy := func() { level.Info(log.Logger).Log("msg", "Tempo started") } + stopped := func() { level.Info(log.Logger).Log("msg", "Tempo stopped") } serviceFailed := func(service services.Service) { // if any service fails, stop everything sm.StopAsync() @@ -228,15 +229,15 @@ func (t *App) Run() error { for m, s := range serviceMap { if s == service { if service.FailureCase() == util.ErrStopProcess { - level.Info(util.Logger).Log("msg", "received stop signal via return error", "module", m, "err", service.FailureCase()) + level.Info(log.Logger).Log("msg", "received stop signal via return error", "module", m, "err", service.FailureCase()) } else { - level.Error(util.Logger).Log("msg", "module failed", "module", m, "err", service.FailureCase()) + level.Error(log.Logger).Log("msg", "module failed", "module", m, "err", service.FailureCase()) } return } } - level.Error(util.Logger).Log("msg", "module failed", "module", "unknown", "err", service.FailureCase()) + level.Error(log.Logger).Log("msg", "module failed", "module", "unknown", "err", service.FailureCase()) } sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) @@ -268,7 +269,7 @@ func (t *App) configHandler() http.HandlerFunc { w.Header().Set("Content-Type", "text/yaml") w.WriteHeader(http.StatusOK) if _, err := w.Write(out); err != nil { - level.Error(util.Logger).Log("msg", "error writing response", "err", err) + level.Error(log.Logger).Log("msg", "error writing response", "err", err) } } diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 643d76b6a8c..125f55e1360 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -12,7 +12,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" @@ -137,7 +137,7 @@ func (t *App) initQuerier() (services.Service, error) { // if we're in single binary mode with no worker address specified, register default endpoint if t.cfg.Querier.Worker.FrontendAddress == "" { t.cfg.Querier.Worker.FrontendAddress = fmt.Sprintf("127.0.0.1:%d", t.cfg.Server.GRPCListenPort) - level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", t.cfg.Querier.Worker.FrontendAddress) + level.Warn(log.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", t.cfg.Querier.Worker.FrontendAddress) } } @@ -159,20 +159,20 @@ func (t *App) initQuerier() (services.Service, error) { func (t *App) initQueryFrontend() (services.Service, error) { var err error - cortexTripper, v1, _, err := cortex_frontend.InitFrontend(t.cfg.Frontend.Config, frontend.CortexNoQuerierLimits{}, 0, util.Logger, prometheus.DefaultRegisterer) + cortexTripper, v1, _, err := cortex_frontend.InitFrontend(t.cfg.Frontend.Config, frontend.CortexNoQuerierLimits{}, 0, log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } t.frontend = v1 // custom tripperware that splits requests - shardingTripperWare, err := frontend.NewTripperware(t.cfg.Frontend, util.Logger, prometheus.DefaultRegisterer) + shardingTripperWare, err := frontend.NewTripperware(t.cfg.Frontend, log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } shardingTripper := shardingTripperWare(cortexTripper) - cortexHandler := cortex_transport.NewHandler(t.cfg.Frontend.Config.Handler, shardingTripper, util.Logger, prometheus.DefaultRegisterer) + cortexHandler := cortex_transport.NewHandler(t.cfg.Frontend.Config.Handler, shardingTripper, log.Logger, prometheus.DefaultRegisterer) tracesHandler := middleware.Merge( t.httpAuthMiddleware, @@ -203,7 +203,7 @@ func (t *App) initCompactor() (services.Service, error) { } func (t *App) initStore() (services.Service, error) { - store, err := tempo_storage.NewStore(t.cfg.StorageConfig, util.Logger) + store, err := tempo_storage.NewStore(t.cfg.StorageConfig, log.Logger) if err != nil { return nil, fmt.Errorf("failed to create store %w", err) } @@ -226,7 +226,7 @@ func (t *App) initMemberlistKV() (services.Service, error) { // todo: do we still need this? does the package do this by default now? t.cfg.MemberlistKV.NodeName = hostname + "-" + uuid.New().String() - t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, util.Logger) + t.memberlistKV = memberlist.NewKVInitService(&t.cfg.MemberlistKV, log.Logger) t.cfg.Ingester.LifecyclerConfig.RingConfig.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV diff --git a/cmd/tempo/main.go b/cmd/tempo/main.go index 277d994be65..84e0fbb829f 100644 --- a/cmd/tempo/main.go +++ b/cmd/tempo/main.go @@ -19,8 +19,8 @@ import ( "github.com/weaveworks/common/logging" "github.com/weaveworks/common/tracing" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/log" ) const appName = "tempo" @@ -55,20 +55,20 @@ func main() { // Init the logger which will honor the log level set in config.Server if reflect.DeepEqual(&config.Server.LogLevel, &logging.Level{}) { - level.Error(util.Logger).Log("msg", "invalid log level") + level.Error(log.Logger).Log("msg", "invalid log level") os.Exit(1) } - util.InitLogger(&config.Server) + log.InitLogger(&config.Server) // Setting the environment variable JAEGER_AGENT_HOST enables tracing trace, err := tracing.NewFromEnv(fmt.Sprintf("%s-%s", appName, config.Target)) if err != nil { - level.Error(util.Logger).Log("msg", "error initialising tracer", "err", err) + level.Error(log.Logger).Log("msg", "error initialising tracer", "err", err) os.Exit(1) } defer func() { if err := trace.Close(); err != nil { - level.Error(util.Logger).Log("msg", "error closing tracing", "err", err) + level.Error(log.Logger).Log("msg", "error closing tracing", "err", err) os.Exit(1) } }() @@ -82,19 +82,19 @@ func main() { // Start Tempo t, err := app.New(*config) if err != nil { - level.Error(util.Logger).Log("msg", "error initialising Tempo", "err", err) + level.Error(log.Logger).Log("msg", "error initialising Tempo", "err", err) os.Exit(1) } - level.Info(util.Logger).Log("msg", "Starting Tempo", "version", version.Info()) + level.Info(log.Logger).Log("msg", "Starting Tempo", "version", version.Info()) if err := t.Run(); err != nil { - level.Error(util.Logger).Log("msg", "error running Tempo", "err", err) + level.Error(log.Logger).Log("msg", "error running Tempo", "err", err) os.Exit(1) } runtime.KeepAlive(ballast) - level.Info(util.Logger).Log("msg", "Tempo running") + level.Info(log.Logger).Log("msg", "Tempo running") } func loadConfig() (*app.Config, error) { diff --git a/go.mod b/go.mod index e325f0c0da3..fab43cb8965 100644 --- a/go.mod +++ b/go.mod @@ -7,16 +7,17 @@ require ( contrib.go.opencensus.io/exporter/prometheus v0.2.0 github.com/Azure/azure-storage-blob-go v0.8.0 github.com/alecthomas/kong v0.2.11 - github.com/cortexproject/cortex v1.6.0 + github.com/cortexproject/cortex v1.6.1-0.20210205171041-527f9b58b93c github.com/go-kit/kit v0.10.0 github.com/gogo/protobuf v1.3.1 github.com/gogo/status v1.0.3 github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b github.com/golang/protobuf v1.4.3 - github.com/golang/snappy v0.0.2 - github.com/google/uuid v1.1.1 + github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 + github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/grafana/loki v1.3.0 + github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-hclog v0.14.0 github.com/jaegertracing/jaeger v1.18.2-0.20200707061226-97d2319ff2be @@ -28,29 +29,28 @@ require ( github.com/opentracing/opentracing-go v1.2.0 github.com/pierrec/lz4/v4 v4.1.3 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.8.0 + github.com/prometheus/client_golang v1.9.0 github.com/prometheus/client_model v0.2.0 github.com/prometheus/common v0.15.0 - github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f + github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e github.com/prometheus/prometheus/discovery/config v0.0.0-00010101000000-000000000000 // indirect github.com/sirupsen/logrus v1.6.0 github.com/spf13/viper v1.7.1 github.com/stretchr/testify v1.6.1 github.com/uber-go/atomic v1.4.0 github.com/uber/jaeger-client-go v2.25.0+incompatible - github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec + github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120 github.com/willf/bitset v1.1.10 // indirect github.com/willf/bloom v2.0.3+incompatible - go.opencensus.io v0.22.4 + go.opencensus.io v0.22.5 go.opentelemetry.io/collector v0.6.1 go.uber.org/atomic v1.7.0 go.uber.org/goleak v1.1.10 go.uber.org/zap v1.15.0 - golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e - google.golang.org/api v0.35.0 - google.golang.org/genproto v0.0.0-20201028140639-c77dae4b0522 // indirect - google.golang.org/grpc v1.33.1 - gopkg.in/yaml.v2 v2.3.0 + golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 + google.golang.org/api v0.36.0 + google.golang.org/grpc v1.33.2 + gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 ) @@ -70,6 +70,7 @@ replace ( github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab github.com/opentracing-contrib/go-grpc => github.com/pracucci/go-grpc v0.0.0-20201022134131-ef559b8db645 github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 + k8s.io/api => k8s.io/api v0.19.4 k8s.io/client-go => k8s.io/client-go v0.19.2 ) diff --git a/go.sum b/go.sum index 79568c41d29..e09a2c670e2 100644 --- a/go.sum +++ b/go.sum @@ -21,6 +21,8 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.66.0 h1:DZeAkuQGQqnm9Xv36SbMJEU8aFBz4wL04UpMWPWwjzg= cloud.google.com/go v0.66.0/go.mod h1:dgqGAjKCDxyhGTtC9dAREQGUJpkceNm1yt590Qno0Ko= +cloud.google.com/go v0.72.0 h1:eWRCuwubtDrCJG0oSUMgnsbD4CmPFQF2ei4OFbXvwww= +cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU= @@ -72,6 +74,8 @@ github.com/Azure/azure-sdk-for-go v45.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9mo github.com/Azure/azure-sdk-for-go v46.4.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible h1:+t2P1j1r5N6lYgPiiz7ZbEVZFkWjVe9WhHbMm0gg8hw= github.com/Azure/azure-sdk-for-go v48.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v49.2.0+incompatible h1:23a1GeBzTLeT53StH9NDJyCMhxCH3awTZaw9ZYBcq78= +github.com/Azure/azure-sdk-for-go v49.2.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= @@ -92,6 +96,8 @@ github.com/Azure/go-autorest/autorest v0.11.4/go.mod h1:JFgpikqFJ/MleTTxwepExTKn github.com/Azure/go-autorest/autorest v0.11.10/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.11 h1:k/wzH9pA3hrtFNsEhJ5SqPEs75W3bzS8VOYA/fJ0j1k= github.com/Azure/go-autorest/autorest v0.11.11/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= +github.com/Azure/go-autorest/autorest v0.11.15 h1:S5SDFpmgoVyvMEOcULyEDlYFrdPmu6Wl0Ic+shkEwzg= +github.com/Azure/go-autorest/autorest v0.11.15/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503 h1:Hxqlh1uAA8aGpa1dFhDNhll7U/rkWtG8ZItFvRMr7l0= @@ -103,6 +109,8 @@ github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQW github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5 h1:Y3bBUV4rTuxenJJs41HU3qmqsb+auo+a3Lz+PlJPpL0= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= +github.com/Azure/go-autorest/autorest/adal v0.9.10 h1:r6fZHMaHD8B6LDCn0o5vyBFHIHrM6Ywwx7mb49lPItI= +github.com/Azure/go-autorest/autorest/adal v0.9.10/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= @@ -192,6 +200,8 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4 h1:EBTWhcAX7rNQ80RLwLCpHZBBrJuzallFHnF+yMXo928= +github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= @@ -213,6 +223,8 @@ github.com/armon/go-metrics v0.3.0 h1:B7AQgHi8QSEi4uHu7Sbsga+IJDU+CENgjxoo81vDUq github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.3 h1:a9F4rlj7EWWrbj7BYw8J8+x+ZZkJeqzNyRk8hdPF+ro= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= +github.com/armon/go-metrics v0.3.6 h1:x/tmtOF9cDBoXH7XoAGOz2qqm1DknFD1590XmD/DUJ8= +github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -243,6 +255,8 @@ github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/ github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= github.com/aws/aws-sdk-go v1.35.31 h1:6tlaYq4Q311qfhft/fIaND33XI27aW3zIdictcHxifE= github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= +github.com/aws/aws-sdk-go v1.36.15 h1:nGqgPlXegCKPZOKXvWnYCLvLPJPRoSOHHn9d0N0DG7Y= +github.com/aws/aws-sdk-go v1.36.15/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/aws/aws-xray-sdk-go v0.9.4/go.mod h1:XtMKdBQfpVut+tJEwI7+dJFRxxRdxHDyVNp2tHXRq04= github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f h1:ZNv7On9kyUzm7fvRZumSyy/IUiSC7AzL0I1jKKtwooA= @@ -337,13 +351,16 @@ github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:P github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU= github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg= -github.com/cortexproject/cortex v1.6.0 h1:/NOdjt80poIPchA9rItwYGeNt2ddxPqMNrCpnRP2iUg= -github.com/cortexproject/cortex v1.6.0/go.mod h1:QSi2ZZeKG3OoZ1+mJSthJK5fnMYAxPUnBEzt0c8Mk1Q= +github.com/cortexproject/cortex v1.6.1-0.20210108144208-6c2dab103f20/go.mod h1:fOsaeeFSyWrjd9nFJO8KVUpsikcxnYsjEzQyjURBoQk= +github.com/cortexproject/cortex v1.6.1-0.20210205171041-527f9b58b93c h1:ac1QnsYGhGO1ou0vsHJljD29FdtAhuMDO+MUPxYVtmQ= +github.com/cortexproject/cortex v1.6.1-0.20210205171041-527f9b58b93c/go.mod h1:hQ45oW8W7SKNBv4bkl1960kWyslLDbL2IWuzCQBCVGY= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= +github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/crossdock/crossdock-go v0.0.0-20160816171116-049aabb0122b/go.mod h1:v9FBN7gdVTpiD/+LZ7Po0UKvROyT87uLVxTHVky/dlQ= github.com/cznic/b v0.0.0-20180115125044-35e9bbe41f07/go.mod h1:URriBxXwVq5ijiJ12C7iIZqlA69nTlI+LgI6/pwftG8= github.com/cznic/fileutil v0.0.0-20180108211300-6a051e75936f/go.mod h1:8S58EK26zhXSxzv7NQFpnliaOQsmDUxvoQO3rt154Vg= @@ -387,6 +404,8 @@ github.com/digitalocean/godo v1.42.1/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2x github.com/digitalocean/godo v1.46.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/digitalocean/godo v1.52.0 h1:1QSUC0w5T1wS1d/1uvPtG8GLeD0p/4zhx1Q+Fxtna+k= github.com/digitalocean/godo v1.52.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= +github.com/digitalocean/godo v1.54.0 h1:KP0Nv87pgViR8k/7De3VrmflCL5pJqXbNnkcw0bwG10= +github.com/digitalocean/godo v1.54.0/go.mod h1:p7dOjjtSBqCTUksqtA5Fd3uaKs9kyTq2xcz76ulEJRU= github.com/docker/distribution v2.7.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= @@ -394,6 +413,8 @@ github.com/docker/docker v0.7.3-0.20190103212154-2b7e084dc98b/go.mod h1:eEKB0N0r github.com/docker/docker v0.7.3-0.20190817195342-4760db040282/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible h1:+mzU0jHyjWpYHiD0StRlsVXkCvecWS2hc55M3OlUJSk= github.com/docker/docker v17.12.0-ce-rc1.0.20200706150819-a40b877fbb9e+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.1+incompatible h1:u0HIBLwOJdemyBdTCkoBX34u3lb5KyBo0rQE3a5Yg+E= +github.com/docker/docker v20.10.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.0-20181218153428-b84716841b82/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= @@ -853,6 +874,8 @@ github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.2 h1:aeE13tS0IiQgFjYdoL8qN3K1N2bXXtI6Vi51/y7BpMw= github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 h1:ur2rms48b3Ep1dxh7aUV2FZEQ8jEVO2F6ILKx8ofkAg= +github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 h1:23T5iq8rbUYlhpt5DB4XJkc6BU31uODLD1o1gKvZmD0= github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2/go.mod h1:k9Qvh+8juN+UKMCS/3jFtGICgW8O96FVaZsaxdzDkR4= github.com/golangci/dupl v0.0.0-20180902072040-3e9179ac440a h1:w8hkcTqaFpzKqonE9uMCefW1WDie15eSP/4MssdenaM= @@ -903,6 +926,8 @@ github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k= github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= @@ -916,6 +941,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190723021845-34ac40c74b70/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= @@ -930,13 +957,18 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7 h1:k+KkMRk8mGOu1xG38StS7dQ+Z6oW1i9n3dgrAVU9Q/E= github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201007051231-1066cbb265c7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201117184057-ae444373da19 h1:iFELRewmQ9CldLrqgr0E6b6ZPfZmMvLyyz6kMsR+c4w= github.com/google/pprof v0.0.0-20201117184057-ae444373da19/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2 h1:LR89qFljJ48s990kEKGsk213yIJDPI4205OKOzbURK8= +github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -961,6 +993,8 @@ github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU8 github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gophercloud/gophercloud v0.14.0 h1:c2Byo+YMxhHlTJ3TPptjQ4dOQ1YknTHDJ/9zClDH+84= github.com/gophercloud/gophercloud v0.14.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= +github.com/gophercloud/gophercloud v0.15.0 h1:jQeAWj0s1p83+TrUXhJhEOK4oe2g6YcBcFwEyMNIjEk= +github.com/gophercloud/gophercloud v0.15.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gopherjs/gopherjs v0.0.0-20181004151105-1babbf986f6f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20191106031601-ce3c9ade29de h1:F7WD09S8QB4LrkEpka0dFPLSotH11HRpCsLIbIcJ7sU= @@ -1024,6 +1058,8 @@ github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038tx github.com/hashicorp/consul/api v1.6.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= github.com/hashicorp/consul/api v1.7.0 h1:tGs8Oep67r8CcA2Ycmb/8BLBcJ70St44mF2X10a/qPg= github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= +github.com/hashicorp/consul/api v1.8.1 h1:BOEQaMWoGMhmQ29fC26bi0qb7/rId9JzZP2V0Xmx7m8= +github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0 h1:UOxjlb4xVNF93jak1mzzoBatyFju9nrkxpVwIp/QqxQ= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= @@ -1032,6 +1068,8 @@ github.com/hashicorp/consul/sdk v0.5.0 h1:WC4594Wp/LkEeML/OdQKEC1yqBmEYkRp6i7X5u github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.6.0 h1:FfhMEkwvQl57CildXJyGHnwGGM4HMODGyfjGwNM1Vdw= github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= +github.com/hashicorp/consul/sdk v0.7.0 h1:H6R9d008jDcHPQPAqPNuydAshJ4v5/8URdFnUvK/+sc= +github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= @@ -1099,6 +1137,8 @@ github.com/hashicorp/serf v0.9.0 h1:+Zd/16AJ9lxk9RzfTDyv/TLhZ8UerqYS0/+JGCIDaa0= github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d h1:W+SIwDdl3+jXWeidYySAgzytE3piq6GumXeBjFBG67c= github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= @@ -1412,6 +1452,8 @@ github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8 github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mjibson/esc v0.2.0 h1:k96hdaR9Z+nMcnDwNrOvhdBqtjyMrbVyxLpsRCdP2mA= github.com/mjibson/esc v0.2.0/go.mod h1:9Hw9gxxfHulMF5OJKCyhYD7PzlSdhzXyaGEBRPH1OPs= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635 h1:rzf0wL0CHVc8CEsgyygG0Mn9CNCCPZqOPaz8RiiHYQk= +github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -1454,6 +1496,8 @@ github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= github.com/ncw/swift v1.0.50 h1:E01b5bVIssNhx2KnzAjMWEXkKrb8ytTqCDWY7lqmWjA= github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= +github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= +github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/nicksnyder/go-i18n v1.10.0/go.mod h1:HrK7VCrbOvQoUAQ7Vpy7i87N7JZZZ7R2xBGjv0j365Q= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -1621,6 +1665,8 @@ github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNja github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= github.com/prometheus/client_golang v1.8.0 h1:zvJNkoCFAnYFNC24FV8nW4JdRJ3GIFcLbg65lL/JDcw= github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= +github.com/prometheus/client_golang v1.9.0 h1:Rrch9mh17XcxvEu9D9DEpb4isxjGBtcevQjKvxPRQIU= +github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1644,6 +1690,7 @@ github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16 github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.15.0 h1:4fgOnadei3EZvgRwxJ7RMpG1k1pOZth5Pc13tyspaKM= github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= +github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1679,6 +1726,8 @@ github.com/prometheus/prometheus v1.8.2-0.20201029103703-63be30dceed9/go.mod h1: github.com/prometheus/prometheus v1.8.2-0.20201119142752-3ad25a6dc3d9/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f h1:OgXvmGvAEUIWgzBTrfXdEMCFxnXBirp32iF0TX2GvqY= github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f/go.mod h1:1MDE/bXgu4gqd5w/otko6WQpXZX9vu8QX4KbitCmaPg= +github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e h1:AecjdAG+yqtpJXxsems6dOD8GT7st5qU9uvlV93G3hw= +github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e/go.mod h1:pZyryEk2SoMVjRI6XFqZLW7B9vPevv8lqwESVYjP1WA= github.com/prometheus/statsd_exporter v0.15.0 h1:UiwC1L5HkxEPeapXdm2Ye0u1vUJfTj7uwT5yydYpa1E= github.com/prometheus/statsd_exporter v0.15.0/go.mod h1:Dv8HnkoLQkeEjkIE4/2ndAA7WL1zHKK7WMqFQqu72rw= github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA= @@ -1722,6 +1771,8 @@ github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da h1:p3Vo3i64TCL github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e h1:CGjiMQ0wMH4wtNWrlj6kiTbkPt2F3rbYnhGX6TWLfco= github.com/samuel/go-zookeeper v0.0.0-20200724154423-2164a8ac840e/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= +github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414 h1:AJNDS0kP60X8wwWFvbLPwDuojxubj9pbfK7pjHw0vKg= +github.com/samuel/go-zookeeper v0.0.0-20201211165307-7117e9ea2414/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/santhosh-tekuri/jsonschema/v2 v2.1.0/go.mod h1:yzJzKUGV4RbWqWIBBP4wSOBqavX5saE02yirLS0OTyg= github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= @@ -1871,8 +1922,9 @@ github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763 h1:c84P3YUu8bx github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a/go.mod h1:A3qUEEbsVkplJnxyDLwuIuvTDaJPByTH+hMdTl9ujAA= github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ= -github.com/thanos-io/thanos v0.13.1-0.20201130180807-84afc97e7d58 h1:Q5t3TKhiFQ2J3XQv1psoMBSBk/Dx6p4JqoETXiWQaYg= -github.com/thanos-io/thanos v0.13.1-0.20201130180807-84afc97e7d58/go.mod h1:ffr9z+gefM664JBH/CEMHyHvShq2BQTejT/Ws+V+80Q= +github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51/go.mod h1:kPvI4H0AynFiHDN95ZB28/k70ZPGCx+pBrRh6RZPimw= +github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe h1:YMGaJuBKOK3XtCxxezHClrV2OTImnSdzpMQnXG9nqgw= +github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDGYRNkgM+FCwYNOD+6tOV+DE2fpjzfV6iqXyOgFIw= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/gjson v1.3.2/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= @@ -1944,6 +1996,8 @@ github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099 h1:MS5M2antM8wzM github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec h1:5JmevdpzK10Z2ua0VDToj7Kg2+/t0FzdYBjsurYRE8k= github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= +github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120 h1:zQtcwREXYNvW116ipgc0bRDg1avD2b6QP0RGPLlPWkc= +github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/promrus v1.2.0 h1:jOLf6pe6/vss4qGHjXmGz4oDJQA+AOCqEL3FvvZGz7M= github.com/weaveworks/promrus v1.2.0/go.mod h1:SaE82+OJ91yqjrE1rsvBWVzNZKcHYFtMUyS1+Ogs/KA= github.com/willf/bitset v1.1.3/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= @@ -2001,6 +2055,8 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= go.opentelemetry.io/collector v0.6.1 h1:sOEj3l1taqtenqOX1WlasowczphZjvU30OpTs+zDsfk= go.opentelemetry.io/collector v0.6.1/go.mod h1:lcHiwlBB9t4nz3nSwgjm1qFr+g2cEOlISIKQqwoIxws= go.opentelemetry.io/otel v0.11.0 h1:IN2tzQa9Gc4ZVKnTaMbPVcHjvzOdg5n9QfnmlqiET7E= @@ -2083,6 +2139,8 @@ golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 h1:sYNJzB4J8toYPQTM6pAkcmBRgw9SnQKP9oXCHfgy604= +golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -2188,8 +2246,11 @@ golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOL golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b h1:iFwSg7t5GZmB/Q5TjiEAsdoLDrdJRC1RiF2WhuV29Qw= +golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181003184128-c57b0facaced/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -2203,6 +2264,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 h1:ld7aEMNHoBnnDAX15v1T6z golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 h1:Mj83v+wSRNEar42a/MQgxk9X42TdEmrOl9i+y8WbxLo= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 h1:Lm4OryKCca1vehdsWogr9N4t7NfZxLbJoc/H0w4K4S4= +golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2216,6 +2279,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a h1:DcqTD9SDLc+1P/r1EmRBwnVsrOwW+kk2vWf9n+1sGhs= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180816055513-1c9583448a9c/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -2313,6 +2378,7 @@ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1 h1:sIky/MyNRSHTrdxfsiUSS4WIA golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200918174421-af09f7315aff/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2320,8 +2386,16 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201008064518-c1f3e3309c71/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211 h1:9UQO31fZ+0aKQOFldThf7BKPMJTiBfWycGh/u3UoO88= golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 h1:nxC68pudNYkKU6jWhgrqdreuFiOQWj1Fs7T3VrH4Pjw= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201223074533-0d417f636930 h1:vRgIt+nup/B/BwIS0g2oC0haq0iqbV3ZA+u6+0TlNCo= +golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2340,6 +2414,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s= golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 h1:Hir2P/De0WpUhtrKGGjvSb2YxUgyZ7EFOSLIcSSpiwE= +golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2395,6 +2471,7 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624190245-7f2218787638/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190711191110-9a621aea19f8/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190719005602-e377ae9d6386/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= @@ -2468,8 +2545,12 @@ golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20200918232735-d647fc253266 h1:k7tVuG0g1JwmD3Jh8oAl1vQ1C3jb4Hi/dUl1wWDBJpQ= golang.org/x/tools v0.0.0-20200918232735-d647fc253266/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201020161133-226fd2f889ca/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c h1:EFNvrTleQM8C5s1RJh1h8mJzTEV7BJxtXTP+6qSofJY= golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201228162255-34cd474b9958 h1:8dEW6cGnUF2NIvtKDx8YsMBEw5pUrMEVUrU0jiPgmu8= +golang.org/x/tools v0.0.0-20201228162255-34cd474b9958/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2513,6 +2594,8 @@ google.golang.org/api v0.32.0 h1:Le77IccnTqEa8ryp9wIpX5W3zYm7Gf9LhOp9PHcwFts= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= +google.golang.org/api v0.36.0 h1:l2Nfbl2GPXdWorv+dT2XfinX2jOOw4zv1VhLstx+6rE= +google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2523,6 +2606,8 @@ google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpC google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= +google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2574,8 +2659,9 @@ google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200921151605-7abf4a1a14d5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20201028140639-c77dae4b0522 h1:7RoRaOmOAXwqnurgQ5g5/d0yCi9ha2UxuTZULXudK7A= -google.golang.org/genproto v0.0.0-20201028140639-c77dae4b0522/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e h1:wYR00/Ht+i/79g/gzhdehBgLIJCklKoc8Q/NebdzzpY= +google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2650,6 +2736,8 @@ gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20191120175047-4206685974f2/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= @@ -2658,6 +2746,9 @@ gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclp gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= +gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2667,15 +2758,6 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4 h1:UoveltGrhghAA7ePc+e+QYDHXrBps2PqFZiHkGR/xK8= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.0.0-20190813020757-36bff7324fb7/go.mod h1:3Iy+myeAORNCLgjd/Xu9ebwN7Vh59Bw0vh9jhoX+V58= -k8s.io/api v0.0.0-20191115095533-47f6de673b26 h1:6L7CEQVcduEr9eUPN3r3RliLvDrvcaniFOE5B5zRfmc= -k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= -k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/api v0.18.5/go.mod h1:tN+e/2nbdGKOAH55NMV8oGrMG+3uRlA9GaRfvnCCSNk= -k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI= -k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= -k8s.io/api v0.19.2 h1:q+/krnHWKsL7OBZg/rxnycsl9569Pud76UJ77MvKXms= -k8s.io/api v0.19.2/go.mod h1:IQpK0zFQ1xc5iNIQPqzgoOwuFugaYHK4iCknlAQP9nI= k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= k8s.io/apimachinery v0.0.0-20190809020650-423f5d784010/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= @@ -2689,6 +2771,8 @@ k8s.io/apimachinery v0.19.2 h1:5Gy9vQpAGTKHPVOh5c4plE274X8D/6cuEiTO2zve7tc= k8s.io/apimachinery v0.19.2/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/apimachinery v0.20.1 h1:LAhz8pKbgR8tUwn7boK+b2HZdt7MiTu2mkYtFMUjTRQ= +k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/client-go v0.19.2 h1:gMJuU3xJZs86L1oQ99R4EViAADUPMHHtS9jFshasHSc= k8s.io/client-go v0.19.2/go.mod h1:S5wPhCqyDNAlzM9CnEdgTGV4OqhsW3jGO1UM1epwfJA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= @@ -2713,6 +2797,8 @@ k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKf k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c= +k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/utils v0.0.0-20190809000727-6c36bc71fc4a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6 h1:p0Ai3qVtkbCG/Af26dBmU0E1W58NID3hSSh7cMyylpM= k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= @@ -2743,6 +2829,8 @@ sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= diff --git a/modules/compactor/compactor.go b/modules/compactor/compactor.go index 509dc46f6f1..65a1544c806 100644 --- a/modules/compactor/compactor.go +++ b/modules/compactor/compactor.go @@ -7,7 +7,7 @@ import ( "time" "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" "github.com/grafana/tempo/modules/overrides" @@ -83,7 +83,7 @@ func (c *Compactor) starting(ctx context.Context) error { ctx := context.Background() - level.Info(util.Logger).Log("msg", "waiting to be active in the ring") + level.Info(log.Logger).Log("msg", "waiting to be active in the ring") err = c.waitRingActive(ctx) if err != nil { return err @@ -95,9 +95,9 @@ func (c *Compactor) starting(ctx context.Context) error { func (c *Compactor) running(ctx context.Context) error { go func() { - level.Info(util.Logger).Log("msg", "waiting for compaction ring to settle", "waitDuration", waitOnStartup) + level.Info(log.Logger).Log("msg", "waiting for compaction ring to settle", "waitDuration", waitOnStartup) time.Sleep(waitOnStartup) - level.Info(util.Logger).Log("msg", "enabling compaction") + level.Info(log.Logger).Log("msg", "enabling compaction") c.store.EnableCompaction(&c.cfg.Compactor, c, c) }() @@ -130,24 +130,24 @@ func (c *Compactor) Owns(hash string) bool { return true } - level.Debug(util.Logger).Log("msg", "checking hash", "hash", hash) + level.Debug(log.Logger).Log("msg", "checking hash", "hash", hash) hasher := fnv.New32a() _, _ = hasher.Write([]byte(hash)) hash32 := hasher.Sum32() - rs, err := c.Ring.Get(hash32, ring.Read, []ring.IngesterDesc{}) + rs, err := c.Ring.Get(hash32, ring.Read, []ring.InstanceDesc{}, nil, nil) if err != nil { - level.Error(util.Logger).Log("msg", "failed to get ring", "err", err) + level.Error(log.Logger).Log("msg", "failed to get ring", "err", err) return false } if len(rs.Ingesters) != 1 { - level.Error(util.Logger).Log("msg", "unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Ingesters)) + level.Error(log.Logger).Log("msg", "unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Ingesters)) return false } - level.Debug(util.Logger).Log("msg", "checking addresses", "owning_addr", rs.Ingesters[0].Addr, "this_addr", c.ringLifecycler.Addr) + level.Debug(log.Logger).Log("msg", "checking addresses", "owning_addr", rs.Ingesters[0].Addr, "this_addr", c.ringLifecycler.Addr) return rs.Ingesters[0].Addr == c.ringLifecycler.Addr } @@ -156,7 +156,7 @@ func (c *Compactor) Owns(hash string) bool { func (c *Compactor) Combine(objA []byte, objB []byte) []byte { combinedTrace, err := tempo_util.CombineTraces(objA, objB) if err != nil { - level.Error(util.Logger).Log("msg", "error combining trace protos", "err", err.Error()) + level.Error(log.Logger).Log("msg", "error combining trace protos", "err", err.Error()) } return combinedTrace } diff --git a/modules/distributor/distributor.go b/modules/distributor/distributor.go index 4d996983b32..3a79fd7d93f 100644 --- a/modules/distributor/distributor.go +++ b/modules/distributor/distributor.go @@ -8,8 +8,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" - cortex_util "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/limiter" + cortex_util "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/gogo/status" opentelemetry_proto_trace_v1 "github.com/open-telemetry/opentelemetry-proto/gen/go/trace/v1" @@ -248,7 +248,7 @@ func (d *Distributor) sendToIngestersViaBytes(ctx context.Context, userID string rawRequests[i] = b } - err := ring.DoBatch(ctx, d.ingestersRing, keys, func(ingester ring.IngesterDesc, indexes []int) error { + err := ring.DoBatch(ctx, ring.Write, d.ingestersRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { localCtx, cancel := context.WithTimeout(context.Background(), d.clientCfg.RemoteTimeout) defer cancel() diff --git a/modules/distributor/distributor_test.go b/modules/distributor/distributor_test.go index 43673234fca..8c9b1e587c2 100644 --- a/modules/distributor/distributor_test.go +++ b/modules/distributor/distributor_test.go @@ -348,7 +348,7 @@ func prepare(t *testing.T, limits *overrides.Limits, kvStore kv.Client) *Distrib replicationFactor: 3, } for addr := range ingesters { - ingestersRing.ingesters = append(ingestersRing.ingesters, ring.IngesterDesc{ + ingestersRing.ingesters = append(ingestersRing.ingesters, ring.InstanceDesc{ Addr: addr, }) } @@ -392,13 +392,13 @@ func (i *mockIngester) Close() error { // ingesters. type mockRing struct { prometheus.Counter - ingesters []ring.IngesterDesc + ingesters []ring.InstanceDesc replicationFactor uint32 } var _ ring.ReadRing = (*mockRing)(nil) -func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.IngesterDesc) (ring.ReplicationSet, error) { +func (r mockRing) Get(key uint32, op ring.Operation, buf []ring.InstanceDesc, _, _ []string) (ring.ReplicationSet, error) { result := ring.ReplicationSet{ MaxErrors: 1, Ingesters: buf[:0], @@ -433,7 +433,7 @@ func (r mockRing) ShuffleShardWithLookback(string, int, time.Duration, time.Time return r } -func (r mockRing) IngesterCount() int { +func (r mockRing) InstancesCount() int { return len(r.ingesters) } diff --git a/modules/distributor/receiver/shim.go b/modules/distributor/receiver/shim.go index 09e669dd4cb..48ccaf83683 100644 --- a/modules/distributor/receiver/shim.go +++ b/modules/distributor/receiver/shim.go @@ -7,7 +7,7 @@ import ( "time" "contrib.go.opencensus.io/exporter/prometheus" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/go-kit/kit/log/level" zaplogfmt "github.com/jsternberg/zap-logfmt" @@ -53,7 +53,7 @@ func New(receiverCfg map[string]interface{}, pusher tempopb.PusherServer, authEn shim := &receiversShim{ authEnabled: authEnabled, pusher: pusher, - logger: tempo_util.NewRateLimitedLogger(logsPerSecond, level.Error(util.Logger)), + logger: tempo_util.NewRateLimitedLogger(logsPerSecond, level.Error(log.Logger)), } v := viper.New() @@ -183,7 +183,7 @@ func (r *receiversShim) ConsumeTraces(ctx context.Context, td pdata.Traces) erro // implements component.Host func (r *receiversShim) ReportFatalError(err error) { - level.Error(util.Logger).Log("msg", "fatal error reported", "err", err) + level.Error(log.Logger).Log("msg", "fatal error reported", "err", err) } // implements component.Host diff --git a/modules/ingester/client/client.go b/modules/ingester/client/client.go index d977a8a4039..93141987f1b 100644 --- a/modules/ingester/client/client.go +++ b/modules/ingester/client/client.go @@ -48,7 +48,13 @@ func New(addr string, cfg Config) (*Client, error) { grpc.UseCompressor("gzip"), ), } - opts = append(opts, cfg.GRPCClientConfig.DialOption(instrumentation())...) + + instrumentationOpts, err := cfg.GRPCClientConfig.DialOption(instrumentation()) + if err != nil { + return nil, err + } + + opts = append(opts, instrumentationOpts...) conn, err := grpc.Dial(addr, opts...) if err != nil { return nil, err diff --git a/modules/ingester/flush.go b/modules/ingester/flush.go index 6ba1681b22f..fb8ef58c358 100644 --- a/modules/ingester/flush.go +++ b/modules/ingester/flush.go @@ -6,7 +6,7 @@ import ( "net/http" "time" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -47,7 +47,7 @@ func (i *Ingester) Flush() { for _, instance := range instances { err := instance.CutCompleteTraces(0, true) if err != nil { - level.Error(util.WithUserID(instance.instanceID, util.Logger)).Log("msg", "failed to cut complete traces on shutdown", "err", err) + level.Error(log.WithUserID(instance.instanceID, log.Logger)).Log("msg", "failed to cut complete traces on shutdown", "err", err) } } } @@ -85,21 +85,21 @@ func (i *Ingester) sweepInstance(instance *instance, immediate bool) { // cut traces internally err := instance.CutCompleteTraces(i.cfg.MaxTraceIdle, immediate) if err != nil { - level.Error(util.WithUserID(instance.instanceID, util.Logger)).Log("msg", "failed to cut traces", "err", err) + level.Error(log.WithUserID(instance.instanceID, log.Logger)).Log("msg", "failed to cut traces", "err", err) return } // see if it's ready to cut a block? err = instance.CutBlockIfReady(i.cfg.MaxBlockDuration, i.cfg.MaxBlockBytes, immediate) if err != nil { - level.Error(util.WithUserID(instance.instanceID, util.Logger)).Log("msg", "failed to cut block", "err", err) + level.Error(log.WithUserID(instance.instanceID, log.Logger)).Log("msg", "failed to cut block", "err", err) return } // dump any blocks that have been flushed for awhile err = instance.ClearFlushedBlocks(i.cfg.CompleteBlockTimeout) if err != nil { - level.Error(util.WithUserID(instance.instanceID, util.Logger)).Log("msg", "failed to complete block", "err", err) + level.Error(log.WithUserID(instance.instanceID, log.Logger)).Log("msg", "failed to complete block", "err", err) } // see if any complete blocks are ready to be flushed @@ -113,7 +113,7 @@ func (i *Ingester) sweepInstance(instance *instance, immediate bool) { func (i *Ingester) flushLoop(j int) { defer func() { - level.Debug(util.Logger).Log("msg", "Ingester.flushLoop() exited") + level.Debug(log.Logger).Log("msg", "Ingester.flushLoop() exited") i.flushQueuesDone.Done() }() @@ -124,11 +124,11 @@ func (i *Ingester) flushLoop(j int) { } op := o.(*flushOp) - level.Debug(util.Logger).Log("msg", "flushing block", "userid", op.userID, "fp") + level.Debug(log.Logger).Log("msg", "flushing block", "userid", op.userID, "fp") err := i.flushUserTraces(op.userID) if err != nil { - level.Error(util.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err) + level.Error(log.WithUserID(op.userID, log.Logger)).Log("msg", "failed to flush user", "err", err) // re-queue failed flush op.from += int64(flushBackoff) diff --git a/modules/ingester/ingester.go b/modules/ingester/ingester.go index d4e12f00d53..93360f96011 100644 --- a/modules/ingester/ingester.go +++ b/modules/ingester/ingester.go @@ -14,7 +14,7 @@ import ( "github.com/weaveworks/common/user" "github.com/cortexproject/cortex/pkg/ring" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/grafana/tempo/modules/overrides" @@ -275,11 +275,11 @@ func (i *Ingester) replayWal() error { return nil } - level.Info(util.Logger).Log("msg", "beginning wal replay", "numBlocks", len(blocks)) + level.Info(log.Logger).Log("msg", "beginning wal replay", "numBlocks", len(blocks)) for _, b := range blocks { tenantID := b.TenantID() - level.Info(util.Logger).Log("msg", "beginning block replay", "tenantID", tenantID) + level.Info(log.Logger).Log("msg", "beginning block replay", "tenantID", tenantID) instance, err := i.getOrCreateInstance(tenantID) if err != nil { @@ -289,7 +289,7 @@ func (i *Ingester) replayWal() error { err = i.replayBlock(b, instance) if err != nil { // there was an error, log and keep on keeping on - level.Error(util.Logger).Log("msg", "error replaying block. removing", "error", err) + level.Error(log.Logger).Log("msg", "error replaying block. removing", "error", err) } err = b.Clear() if err != nil { diff --git a/modules/ingester/instance.go b/modules/ingester/instance.go index 85339ae0942..2a271e631c4 100644 --- a/modules/ingester/instance.go +++ b/modules/ingester/instance.go @@ -8,7 +8,7 @@ import ( "sync" "time" - cortex_util "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/gogo/protobuf/proto" "github.com/gogo/status" @@ -163,7 +163,7 @@ func (i *instance) CutBlockIfReady(maxBlockLifetime time.Duration, maxBlockBytes _ = i.completingBlock.Clear() metricFailedFlushes.Inc() i.completingBlock = nil - level.Error(cortex_util.Logger).Log("msg", "unable to complete block. THIS BLOCK WAS LOST", "tenantID", i.instanceID, "err", err) + level.Error(log.Logger).Log("msg", "unable to complete block. THIS BLOCK WAS LOST", "tenantID", i.instanceID, "err", err) return } i.completingBlock = nil @@ -340,7 +340,7 @@ func (i *instance) writeTraceToHeadBlock(id common.ID, b []byte) error { func (i *instance) Combine(objA []byte, objB []byte) []byte { combinedTrace, err := util.CombineTraces(objA, objB) if err != nil { - level.Error(cortex_util.Logger).Log("msg", "error combining trace protos", "err", err.Error()) + level.Error(log.Logger).Log("msg", "error combining trace protos", "err", err.Error()) } return combinedTrace } diff --git a/modules/querier/config.go b/modules/querier/config.go index 38432660a6b..6b527a9c19b 100644 --- a/modules/querier/config.go +++ b/modules/querier/config.go @@ -25,13 +25,10 @@ func (cfg *Config) RegisterFlagsAndApplyDefaults(prefix string, f *flag.FlagSet) MatchMaxConcurrency: true, MaxConcurrentRequests: cfg.MaxConcurrentQueries, Parallelism: 2, - GRPCClientConfig: grpcclient.ConfigWithTLS{ - GRPC: grpcclient.Config{ - MaxRecvMsgSize: 100 << 20, - MaxSendMsgSize: 16 << 20, - UseGzipCompression: false, - GRPCCompression: "gzip", - }, + GRPCClientConfig: grpcclient.Config{ + MaxRecvMsgSize: 100 << 20, + MaxSendMsgSize: 16 << 20, + GRPCCompression: "gzip", }, } diff --git a/modules/querier/querier.go b/modules/querier/querier.go index 90c89790ead..db5ef519cc8 100644 --- a/modules/querier/querier.go +++ b/modules/querier/querier.go @@ -16,7 +16,7 @@ import ( cortex_worker "github.com/cortexproject/cortex/pkg/querier/worker" "github.com/cortexproject/cortex/pkg/ring" ring_client "github.com/cortexproject/cortex/pkg/ring/client" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" httpgrpc_server "github.com/weaveworks/common/httpgrpc/server" @@ -69,7 +69,7 @@ func New(cfg Config, clientCfg ingester_client.Config, ring ring.ReadRing, store ring_client.NewRingServiceDiscovery(ring), factory, metricIngesterClients, - util.Logger), + log.Logger), store: store, limits: limits, } @@ -83,7 +83,7 @@ func (q *Querier) CreateAndRegisterWorker(tracesHandler http.Handler) error { worker, err := cortex_worker.NewQuerierWorker( q.cfg.Worker, httpgrpc_server.NewServer(tracesHandler), - util.Logger, + log.Logger, nil, ) if err != nil { @@ -153,8 +153,8 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque key := tempo_util.TokenFor(userID, req.TraceID) const maxExpectedReplicationSet = 3 // 3. b/c frigg it - var descs [maxExpectedReplicationSet]ring.IngesterDesc - replicationSet, err := q.ring.Get(key, ring.Read, descs[:0]) + var descs [maxExpectedReplicationSet]ring.InstanceDesc + replicationSet, err := q.ring.Get(key, ring.Read, descs[:0], nil, nil) if err != nil { return nil, errors.Wrap(err, "error finding ingesters in Querier.FindTraceByID") } @@ -211,7 +211,7 @@ func (q *Querier) FindTraceByID(ctx context.Context, req *tempopb.TraceByIDReque // forGivenIngesters runs f, in parallel, for given ingesters func (q *Querier) forGivenIngesters(ctx context.Context, replicationSet ring.ReplicationSet, f func(client tempopb.QuerierClient) (*tempopb.TraceByIDResponse, error)) ([]responseFromIngesters, error) { - results, err := replicationSet.Do(ctx, q.cfg.ExtraQueryDelay, func(ctx context.Context, ingester *ring.IngesterDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, q.cfg.ExtraQueryDelay, func(ctx context.Context, ingester *ring.InstanceDesc) (interface{}, error) { client, err := q.pool.GetClientFor(ingester.Addr) if err != nil { return nil, err diff --git a/pkg/ring/ring.go b/pkg/ring/ring.go index d1baa9448ba..d60aeeb15e2 100644 --- a/pkg/ring/ring.go +++ b/pkg/ring/ring.go @@ -45,14 +45,14 @@ type EventuallyConsistentStrategy struct { // - Filters out dead ingesters so the one doesn't even try to write to them. // - Checks there is enough ingesters for an operation to succeed. // The ingesters argument may be overwritten. -func (s *EventuallyConsistentStrategy) Filter(ingesters []ring.IngesterDesc, op ring.Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]ring.IngesterDesc, int, error) { +func (s *EventuallyConsistentStrategy) Filter(ingesters []ring.InstanceDesc, op ring.Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]ring.InstanceDesc, int, error) { minSuccess := 1 // Skip those that have not heartbeated in a while. NB these are still // included in the calculation of minSuccess, so if too many failed ingesters // will cause the whole write to fail. for i := 0; i < len(ingesters); { - if ingesters[i].IsHealthy(op, heartbeatTimeout) { + if ingesters[i].IsHealthy(op, heartbeatTimeout, time.Now()) { i++ } else { ingesters = append(ingesters[:i], ingesters[i+1:]...) @@ -70,7 +70,7 @@ func (s *EventuallyConsistentStrategy) Filter(ingesters []ring.IngesterDesc, op return ingesters, len(ingesters) - minSuccess, nil } -func (s *EventuallyConsistentStrategy) ShouldExtendReplicaSet(ingester ring.IngesterDesc, op ring.Operation) bool { +func (s *EventuallyConsistentStrategy) ShouldExtendReplicaSet(ingester ring.InstanceDesc, op ring.Operation) bool { // We do not want to Write to Ingesters that are not ACTIVE, but we do want // to write the extra replica somewhere. So we increase the size of the set // of replicas for the key. This means we have to also increase the diff --git a/pkg/util/log_test.go b/pkg/util/log_test.go index 8fb456d4983..ce0966e2d25 100644 --- a/pkg/util/log_test.go +++ b/pkg/util/log_test.go @@ -3,13 +3,13 @@ package util import ( "testing" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/stretchr/testify/assert" ) func TestRateLimitedLogger(t *testing.T) { - logger := NewRateLimitedLogger(10, level.Error(util.Logger)) + logger := NewRateLimitedLogger(10, level.Error(log.Logger)) assert.NotNil(t, logger) logger.Log("test") diff --git a/pkg/util/trace.go b/pkg/util/trace.go index b54f3fe8d12..85fcadfff86 100644 --- a/pkg/util/trace.go +++ b/pkg/util/trace.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" "github.com/gogo/protobuf/proto" "github.com/grafana/tempo/pkg/tempopb" @@ -33,7 +33,7 @@ func CombineTraces(objA []byte, objB []byte) ([]byte, error) { return objA, errors.Wrap(errB, "error unsmarshaling objB") } else if errA != nil && errB != nil { // if both failed let's send back an empty trace - level.Error(util.Logger).Log("msg", "both A and B failed to unmarshal. returning an empty trace") + level.Error(log.Logger).Log("msg", "both A and B failed to unmarshal. returning an empty trace") bytes, _ := proto.Marshal(&tempopb.Trace{}) return bytes, errors.Wrap(errA, "both A and B failed to unmarshal. returning an empty trace") } diff --git a/tempodb/backend/s3/s3.go b/tempodb/backend/s3/s3.go index 875067ae86e..14d814ae437 100644 --- a/tempodb/backend/s3/s3.go +++ b/tempodb/backend/s3/s3.go @@ -10,7 +10,7 @@ import ( "net/http" "strings" - log_util "github.com/cortexproject/cortex/pkg/util" + log_util "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/google/uuid" diff --git a/tempodb/tempodb.go b/tempodb/tempodb.go index b6b9788e16a..7d2822dac6b 100644 --- a/tempodb/tempodb.go +++ b/tempodb/tempodb.go @@ -15,7 +15,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/cortexproject/cortex/pkg/util" + log_util "github.com/cortexproject/cortex/pkg/util/log" "github.com/opentracing/opentracing-go" ot_log "github.com/opentracing/opentracing-go/log" @@ -210,7 +210,7 @@ func (rw *readerWriter) WAL() *wal.WAL { func (rw *readerWriter) Find(ctx context.Context, tenantID string, id common.ID, blockStart string, blockEnd string) ([][]byte, error) { // tracing instrumentation - logger := util.WithContext(ctx, util.Logger) + logger := log_util.WithContext(ctx, log_util.Logger) span, ctx := opentracing.StartSpanFromContext(ctx, "store.Find") defer span.Finish() diff --git a/vendor/cloud.google.com/go/.gitignore b/vendor/cloud.google.com/go/.gitignore index ee9694b8780..cc7e53b46c0 100644 --- a/vendor/cloud.google.com/go/.gitignore +++ b/vendor/cloud.google.com/go/.gitignore @@ -2,6 +2,7 @@ .idea .vscode *.swp +.history # Test files *.test diff --git a/vendor/cloud.google.com/go/CHANGES.md b/vendor/cloud.google.com/go/CHANGES.md index b60914f11b4..4c762e636bd 100644 --- a/vendor/cloud.google.com/go/CHANGES.md +++ b/vendor/cloud.google.com/go/CHANGES.md @@ -1,5 +1,98 @@ # Changes + +## [0.72.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.71.0...v0.72.0) (2020-11-10) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3177](https://www.github.com/googleapis/google-cloud-go/issues/3177) [#3164](https://www.github.com/googleapis/google-cloud-go/issues/3164) [#3149](https://www.github.com/googleapis/google-cloud-go/issues/3149) [#3142](https://www.github.com/googleapis/google-cloud-go/issues/3142) [#3136](https://www.github.com/googleapis/google-cloud-go/issues/3136) [#3130](https://www.github.com/googleapis/google-cloud-go/issues/3130) [#3121](https://www.github.com/googleapis/google-cloud-go/issues/3121) [#3119](https://www.github.com/googleapis/google-cloud-go/issues/3119) + + +### Bug Fixes + +* **all:** Update hand-written clients to not use WithEndpoint override ([#3111](https://www.github.com/googleapis/google-cloud-go/issues/3111)) ([f0cfd05](https://www.github.com/googleapis/google-cloud-go/commit/f0cfd0532f5204ff16f7bae406efa72603d16f44)) +* **internal/godocfx:** rename README files to pkg-readme ([#3185](https://www.github.com/googleapis/google-cloud-go/issues/3185)) ([d3a8571](https://www.github.com/googleapis/google-cloud-go/commit/d3a85719be411b692aede3331abb29b5a7b3da9a)) + + +## [0.71.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.70.0...v0.71.0) (2020-10-30) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3115](https://www.github.com/googleapis/google-cloud-go/issues/3115) [#3106](https://www.github.com/googleapis/google-cloud-go/issues/3106) [#3102](https://www.github.com/googleapis/google-cloud-go/issues/3102) [#3083](https://www.github.com/googleapis/google-cloud-go/issues/3083) [#3073](https://www.github.com/googleapis/google-cloud-go/issues/3073) [#3057](https://www.github.com/googleapis/google-cloud-go/issues/3057) [#3044](https://www.github.com/googleapis/google-cloud-go/issues/3044) +* **billing/budgets:** start generating apiv1 ([#3099](https://www.github.com/googleapis/google-cloud-go/issues/3099)) ([e760c85](https://www.github.com/googleapis/google-cloud-go/commit/e760c859de88a6e79b6dffc653dbf75f1630d8e3)) +* **internal:** auto-run godocfx on new mods ([#3069](https://www.github.com/googleapis/google-cloud-go/issues/3069)) ([49f497e](https://www.github.com/googleapis/google-cloud-go/commit/49f497eab80ce34dfb4ca41f033a5c0429ff5e42)) +* **pubsublite:** Added Pub/Sub Lite clients and routing headers ([#3105](https://www.github.com/googleapis/google-cloud-go/issues/3105)) ([98668fa](https://www.github.com/googleapis/google-cloud-go/commit/98668fa5457d26ed34debee708614f027020e5bc)) +* **pubsublite:** Message type and message routers ([#3077](https://www.github.com/googleapis/google-cloud-go/issues/3077)) ([179fc55](https://www.github.com/googleapis/google-cloud-go/commit/179fc550b545a5344358a243da7007ffaa7b5171)) +* **pubsublite:** Pub/Sub Lite admin client ([#3036](https://www.github.com/googleapis/google-cloud-go/issues/3036)) ([749473e](https://www.github.com/googleapis/google-cloud-go/commit/749473ead30bf1872634821d3238d1299b99acc6)) +* **pubsublite:** Publish settings and errors ([#3075](https://www.github.com/googleapis/google-cloud-go/issues/3075)) ([9eb9fcb](https://www.github.com/googleapis/google-cloud-go/commit/9eb9fcb79f17ad7c08c77c455ba3e8d89e3bdbf2)) +* **pubsublite:** Retryable stream wrapper ([#3068](https://www.github.com/googleapis/google-cloud-go/issues/3068)) ([97cfd45](https://www.github.com/googleapis/google-cloud-go/commit/97cfd4587f2f51996bd685ff486308b70eb51900)) + + +### Bug Fixes + +* **internal/kokoro:** remove unnecessary cd ([#3071](https://www.github.com/googleapis/google-cloud-go/issues/3071)) ([c1a4c3e](https://www.github.com/googleapis/google-cloud-go/commit/c1a4c3eaffcdc3cffe0e223fcfa1f60879cd23bb)) +* **pubsublite:** Disable integration tests for project id ([#3087](https://www.github.com/googleapis/google-cloud-go/issues/3087)) ([a0982f7](https://www.github.com/googleapis/google-cloud-go/commit/a0982f79d6461feabdf31363f29fed7dc5677fe7)) + +## [0.70.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.70.0) (2020-10-19) + + +### Features + +* **all:** auto-regenerate gapics , refs [#3047](https://www.github.com/googleapis/google-cloud-go/issues/3047) [#3035](https://www.github.com/googleapis/google-cloud-go/issues/3035) [#3025](https://www.github.com/googleapis/google-cloud-go/issues/3025) +* **managedidentities:** start generating apiv1 ([#3032](https://www.github.com/googleapis/google-cloud-go/issues/3032)) ([10ccca2](https://www.github.com/googleapis/google-cloud-go/commit/10ccca238074d24fea580a4cd8e64478818b0b44)) +* **pubsublite:** Types for resource paths and topic/subscription configs ([#3026](https://www.github.com/googleapis/google-cloud-go/issues/3026)) ([6f7fa86](https://www.github.com/googleapis/google-cloud-go/commit/6f7fa86ed906258f98d996aab40184f3a46f9714)) + +## [0.69.1](https://www.github.com/googleapis/google-cloud-go/compare/v0.69.0...v0.69.1) (2020-10-14) + +This is an empty release that was created solely to aid in pubsublite's module +carve out. See: https://github.com/golang/go/wiki/Modules#is-it-possible-to-add-a-module-to-a-multi-module-repository. + +## [0.69.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.68.0...v0.69.0) (2020-10-14) + + +### Features + +* **accessapproval:** start generating apiv1 ([#3002](https://www.github.com/googleapis/google-cloud-go/issues/3002)) ([709d6e7](https://www.github.com/googleapis/google-cloud-go/commit/709d6e76393e6ac00ff488efd83bfe873173b045)) +* **all:** auto-regenerate gapics , refs [#3010](https://www.github.com/googleapis/google-cloud-go/issues/3010) [#3005](https://www.github.com/googleapis/google-cloud-go/issues/3005) [#2993](https://www.github.com/googleapis/google-cloud-go/issues/2993) [#2989](https://www.github.com/googleapis/google-cloud-go/issues/2989) [#2981](https://www.github.com/googleapis/google-cloud-go/issues/2981) [#2976](https://www.github.com/googleapis/google-cloud-go/issues/2976) [#2968](https://www.github.com/googleapis/google-cloud-go/issues/2968) [#2958](https://www.github.com/googleapis/google-cloud-go/issues/2958) +* **cmd/go-cloud-debug-agent:** mark as deprecated ([#2964](https://www.github.com/googleapis/google-cloud-go/issues/2964)) ([276ec88](https://www.github.com/googleapis/google-cloud-go/commit/276ec88b05852c33a3ba437e18d072f7ffd8fd33)) +* **godocfx:** add nesting to TOC ([#2972](https://www.github.com/googleapis/google-cloud-go/issues/2972)) ([3a49b2d](https://www.github.com/googleapis/google-cloud-go/commit/3a49b2d142a353f98429235c3f380431430b4dbf)) +* **internal/godocfx:** HTML-ify package summary ([#2986](https://www.github.com/googleapis/google-cloud-go/issues/2986)) ([9e64b01](https://www.github.com/googleapis/google-cloud-go/commit/9e64b018255bd8d9b31d60e8f396966251de946b)) +* **internal/kokoro:** make publish_docs VERSION optional ([#2979](https://www.github.com/googleapis/google-cloud-go/issues/2979)) ([76e35f6](https://www.github.com/googleapis/google-cloud-go/commit/76e35f689cb60bd5db8e14b8c8d367c5902bcb0e)) +* **websecurityscanner:** start generating apiv1 ([#3006](https://www.github.com/googleapis/google-cloud-go/issues/3006)) ([1d92e20](https://www.github.com/googleapis/google-cloud-go/commit/1d92e2062a13f62d7a96be53a7354c0cacca6a85)) + + +### Bug Fixes + +* **godocfx:** make extra files optional, filter out third_party ([#2985](https://www.github.com/googleapis/google-cloud-go/issues/2985)) ([f268921](https://www.github.com/googleapis/google-cloud-go/commit/f2689214a24b2e325d3e8f54441bb11fbef925f0)) + +## [0.68.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.67.0...v0.68.0) (2020-10-02) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2952](https://www.github.com/googleapis/google-cloud-go/issues/2952) [#2944](https://www.github.com/googleapis/google-cloud-go/issues/2944) [#2935](https://www.github.com/googleapis/google-cloud-go/issues/2935) + +## [0.67.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.66.0...v0.67.0) (2020-09-29) + + +### Features + +* **all:** auto-regenerate gapics , refs [#2933](https://www.github.com/googleapis/google-cloud-go/issues/2933) [#2919](https://www.github.com/googleapis/google-cloud-go/issues/2919) [#2913](https://www.github.com/googleapis/google-cloud-go/issues/2913) [#2910](https://www.github.com/googleapis/google-cloud-go/issues/2910) [#2899](https://www.github.com/googleapis/google-cloud-go/issues/2899) [#2897](https://www.github.com/googleapis/google-cloud-go/issues/2897) [#2886](https://www.github.com/googleapis/google-cloud-go/issues/2886) [#2877](https://www.github.com/googleapis/google-cloud-go/issues/2877) [#2869](https://www.github.com/googleapis/google-cloud-go/issues/2869) [#2864](https://www.github.com/googleapis/google-cloud-go/issues/2864) +* **assuredworkloads:** start generating apiv1beta1 ([#2866](https://www.github.com/googleapis/google-cloud-go/issues/2866)) ([7598c4d](https://www.github.com/googleapis/google-cloud-go/commit/7598c4dd2462e8270a2c7b1f496af58ca81ff568)) +* **dialogflow/cx:** start generating apiv3beta1 ([#2875](https://www.github.com/googleapis/google-cloud-go/issues/2875)) ([37ca93a](https://www.github.com/googleapis/google-cloud-go/commit/37ca93ad69eda363d956f0174d444ed5914f5a72)) +* **docfx:** add support for examples ([#2884](https://www.github.com/googleapis/google-cloud-go/issues/2884)) ([0cc0de3](https://www.github.com/googleapis/google-cloud-go/commit/0cc0de300d58be6d3b7eeb2f1baebfa6df076830)) +* **godocfx:** include README in output ([#2927](https://www.github.com/googleapis/google-cloud-go/issues/2927)) ([f084690](https://www.github.com/googleapis/google-cloud-go/commit/f084690a2ea08ce73bafaaced95ad271fd01e11e)) +* **talent:** start generating apiv4 ([#2871](https://www.github.com/googleapis/google-cloud-go/issues/2871)) ([5c98071](https://www.github.com/googleapis/google-cloud-go/commit/5c98071b03822c58862d1fa5442ff36d627f1a61)) + + +### Bug Fixes + +* **godocfx:** filter out other modules, sort pkgs ([#2894](https://www.github.com/googleapis/google-cloud-go/issues/2894)) ([868db45](https://www.github.com/googleapis/google-cloud-go/commit/868db45e2e6f4e9ad48432be86c849f335e1083d)) +* **godocfx:** shorten function names ([#2880](https://www.github.com/googleapis/google-cloud-go/issues/2880)) ([48a0217](https://www.github.com/googleapis/google-cloud-go/commit/48a0217930750c1f4327f2622b0f2a3ec8afc0b7)) +* **translate:** properly name examples ([#2892](https://www.github.com/googleapis/google-cloud-go/issues/2892)) ([c19e141](https://www.github.com/googleapis/google-cloud-go/commit/c19e1415e6fa76b7ea66a7fc67ad3ba22670a2ba)), refs [#2883](https://www.github.com/googleapis/google-cloud-go/issues/2883) + ## [0.66.0](https://www.github.com/googleapis/google-cloud-go/compare/v0.65.0...v0.66.0) (2020-09-15) diff --git a/vendor/cloud.google.com/go/CONTRIBUTING.md b/vendor/cloud.google.com/go/CONTRIBUTING.md index 00adbb0bbf9..e1398722c3e 100644 --- a/vendor/cloud.google.com/go/CONTRIBUTING.md +++ b/vendor/cloud.google.com/go/CONTRIBUTING.md @@ -103,7 +103,8 @@ Next, ensure the following APIs are enabled in the general project: - Google Compute Engine Instance Group Updater API - Google Compute Engine Instance Groups API - Kubernetes Engine API -- Stackdriver Error Reporting API +- Cloud Error Reporting API +- Pub/Sub Lite API Next, create a Datastore database in the general project, and a Firestore database in the Firestore project. @@ -150,7 +151,7 @@ $ gcloud auth login $ gcloud datastore indexes create datastore/testdata/index.yaml # Creates a Google Cloud storage bucket with the same name as your test project, -# and with the Stackdriver Logging service account as owner, for the sink +# and with the Cloud Logging service account as owner, for the sink # integration tests in logging. $ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID $ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID diff --git a/vendor/cloud.google.com/go/RELEASING.md b/vendor/cloud.google.com/go/RELEASING.md index c8c7f933527..12e0c6104b9 100644 --- a/vendor/cloud.google.com/go/RELEASING.md +++ b/vendor/cloud.google.com/go/RELEASING.md @@ -1,4 +1,6 @@ -# Setup from scratch +# Releasing + +## Setup environment from scratch 1. [Install Go](https://golang.org/dl/). 1. Ensure that your `GOBIN` directory (by default `$(go env GOPATH)/bin`) @@ -19,7 +21,7 @@ 1. Fork the repo and add your fork as a secondary remote (this is necessary in order to create PRs). -# Which module to release? +## Determine which module to release The Go client libraries have several modules. Each module does not strictly correspond to a single library - they correspond to trees of directories. If a @@ -27,7 +29,7 @@ file needs to be released, you must release the closest ancestor module. To see all modules: -``` +```bash $ cat `find . -name go.mod` | grep module module cloud.google.com/go module cloud.google.com/go/bigtable @@ -53,18 +55,33 @@ of the `cloud.google.com/go` repository root module. Note: releasing `cloud.google.com/go` has no impact on any of the submodules, and vice-versa. They are released entirely independently. -# Test failures +## Test failures If there are any test failures in the Kokoro build, releases are blocked until the failures have been resolved. -# How to release `cloud.google.com/go` +## How to release `cloud.google.com/go` -1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any - failures in the most recent build, address them before proceeding with the - release. -1. Navigate to `~/code/gocloud/` and switch to master. +Check for failures in the [continuous Kokoro build](http://go/google-cloud-go-continuous). +If there are any failures in the most recent build, address them before +proceeding with the release. + +### Automated Release + +If there are changes that have not yet been released a +[pull request](https://github.com/googleapis/google-cloud-go/pull/2971) should +be automatically opened by [release-please](https://github.com/googleapis/release-please) +with a title like "chore: release 0.XX.0", where XX is the next version to be +released. To cut a release, approve and merge this pull request. Doing so will +update the `CHANGES.md`, tag the merged commit with the appropriate version, +and draft a GitHub release. + +### Manual Release + +If for whatever reason the automated release process is not working as expected, +here is how to manually cut a release. + +1. Navigate to `google-cloud-go/` and switch to master. 1. `git pull` 1. Run `git tag -l | grep -v beta | grep -v alpha` to see all existing releases. The current latest tag `$CV` is the largest tag. It should look something @@ -76,8 +93,11 @@ the failures have been resolved. (the `git log` is going to show you things in submodules, which are not going to be part of your release). 1. Edit `CHANGES.md` to include a summary of the changes. -1. `cd internal/version && go generate && cd -` -1. Commit the changes, push to your fork, and create a PR. +1. In `internal/version/version.go`, update `const Repo` to today's date with + the format `YYYYMMDD`. +1. In `internal/version` run `go generate`. +1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, + and create a PR titled `chore: release $NV`. 1. Wait for the PR to be reviewed and merged. Once it's merged, and without merging any other PRs in the meantime: a. Switch to master. @@ -85,10 +105,10 @@ the failures have been resolved. c. Tag the repo with the next version: `git tag $NV`. d. Push the tag to origin: `git push origin $NV` -2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) +1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) with the new release, copying the contents of `CHANGES.md`. -# How to release a submodule +## How to release a submodule We have several submodules, including `cloud.google.com/go/logging`, `cloud.google.com/go/datastore`, and so on. @@ -98,11 +118,11 @@ To release a submodule: (these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly) 1. Check for failures in the - [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any - failures in the most recent build, address them before proceeding with the - release. (This applies even if the failures are in a different submodule from the one - being released.) -1. Navigate to `~/code/gocloud/` and switch to master. + [continuous Kokoro build](http://go/google-cloud-go-continuous). If there are + any failures in the most recent build, address them before proceeding with + the release. (This applies even if the failures are in a different submodule + from the one being released.) +1. Navigate to `google-cloud-go/` and switch to master. 1. `git pull` 1. Run `git tag -l | grep datastore | grep -v beta | grep -v alpha` to see all existing releases. The current latest tag `$CV` is the largest tag. It @@ -111,8 +131,9 @@ To release a submodule: 1. On master, run `git log $CV.. -- datastore/` to list all the changes to the submodule directory since the last release. 1. Edit `datastore/CHANGES.md` to include a summary of the changes. -1. `cd internal/version && go generate && cd -` -1. Commit the changes, push to your fork, and create a PR. +1. In `internal/version` run `go generate`. +1. Commit the changes, ignoring the generated `.go-r` file. Push to your fork, + and create a PR titled `chore(datastore): release $NV`. 1. Wait for the PR to be reviewed and merged. Once it's merged, and without merging any other PRs in the meantime: a. Switch to master. @@ -123,6 +144,6 @@ To release a submodule: 1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases) with the new release, copying the contents of `datastore/CHANGES.md`. -# Appendix +## Appendix 1: This should get better as submodule tooling matures. diff --git a/vendor/cloud.google.com/go/go.mod b/vendor/cloud.google.com/go/go.mod index 0f950973604..45d251b5460 100644 --- a/vendor/cloud.google.com/go/go.mod +++ b/vendor/cloud.google.com/go/go.mod @@ -5,19 +5,19 @@ go 1.11 require ( cloud.google.com/go/storage v1.10.0 github.com/golang/mock v1.4.4 - github.com/golang/protobuf v1.4.2 + github.com/golang/protobuf v1.4.3 github.com/google/go-cmp v0.5.2 - github.com/google/martian/v3 v3.0.0 - github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7 + github.com/google/martian/v3 v3.1.0 + github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c github.com/googleapis/gax-go/v2 v2.0.5 github.com/jstemmer/go-junit-report v0.9.1 - go.opencensus.io v0.22.4 + go.opencensus.io v0.22.5 golang.org/x/lint v0.0.0-20200302205851-738671d3881b - golang.org/x/net v0.0.0-20200904194848-62affa334b73 + golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43 - golang.org/x/text v0.3.3 - golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c - google.golang.org/api v0.31.0 - google.golang.org/genproto v0.0.0-20200914193844-75d14daec038 - google.golang.org/grpc v1.31.1 + golang.org/x/text v0.3.4 + golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd + google.golang.org/api v0.35.0 + google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb + google.golang.org/grpc v1.33.2 ) diff --git a/vendor/cloud.google.com/go/go.sum b/vendor/cloud.google.com/go/go.sum index f8e9effe04d..3de6286e82d 100644 --- a/vendor/cloud.google.com/go/go.sum +++ b/vendor/cloud.google.com/go/go.sum @@ -101,6 +101,8 @@ github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0 github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= @@ -123,6 +125,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0 h1:pMen7vLs8nvgEYhywH3KDWJIJTeEr2ULsVWHWYHQyBs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/martian/v3 v3.1.0 h1:wCKgOCHuUEVfsaQLpPSJb7VdYCdTVZQAuOdYm1yc/60= +github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f h1:Jnx61latede7zDD3DiiP4gmNz33uK0U5HDUaF0a/HVQ= @@ -133,9 +137,10 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99 h1:Ak8CrdlwwXwAZxzS66vgPt4U8yUZX7JwLvVR58FN5jM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7 h1:k+KkMRk8mGOu1xG38StS7dQ+Z6oW1i9n3dgrAVU9Q/E= -github.com/google/pprof v0.0.0-20200905233945-acf8798be1f7/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c h1:Jx2lEv4nMccTJE+IIZOVIvk+DjNKlRsW0sm1uBr896U= +github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS0VQKc= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= @@ -146,6 +151,7 @@ github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+ github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024 h1:rBMNdlhTLzJjJSDIjNEXX1Pz3Hmwmz91v+zycvx9PJc= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= @@ -174,6 +180,8 @@ go.opencensus.io v0.22.3 h1:8sGtKOrtQqkN1bp2AtX+misvLIlOmsEsNd+9NIcPEm8= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4 h1:LYy1Hy3MJdrCdMwwzxA/dRok4ejH+RwNGbuoD9fCjto= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.5 h1:dntmOdLpSpHlVqbW5Eay97DelsZHe+55D+xC6i0dDS0= +go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -255,8 +263,10 @@ golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgN golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73 h1:MXfv8rhZWmFeqX3GNZRsd6vOLoaCHjYEX3qkRo3YBUA= -golang.org/x/net v0.0.0-20200904194848-62affa334b73/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102 h1:42cLlJJdEh+ySyeUUbEQ5bsTiq8voBeTuweGVkY6Puw= +golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421 h1:Wo7BWFiOk0QRFMLYMqJGFMd9CgUAcGx7V+qEg/h5IBI= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -280,6 +290,8 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a h1:WXEvlFVvvGxCJLG6REjsT03i golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 h1:SQFwaSi55rU7vdNs9Yr0Z324VNlrF+0wMqRXT4St8ck= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -314,8 +326,10 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zr golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642 h1:B6caxRw+hozq68X2MY7jEpZh/cr4/aHLv9xU8Kkadrw= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4 h1:kCCpuwSAoYJPkNc6x0xT9yTtV4oKtARo4RGBQWOfg9E= -golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f h1:Fqb3ao1hUmOR3GkUOg/Y+BadLwykBIzs5q8Ez2SbHyc= +golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= @@ -324,6 +338,8 @@ golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= @@ -376,10 +392,9 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d h1:szSOL78iTCl0LF1AMjhSWJj8tIM0KixlUUnBtYXsmd8= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200828161849-5deb26317202 h1:DrWbY9UUFi/sl/3HkNVoBjDbGfIPZZfgoGsGxOL1EU8= -golang.org/x/tools v0.0.0-20200828161849-5deb26317202/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c h1:AQsh/7arPVFDBraQa8x7GoVnwnGg1kM7J2ySI0kF5WU= -golang.org/x/tools v0.0.0-20200915173823-2db8f0ff891c/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= +golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd h1:kJP9fbfkpUoA4y03Nxor8be+YbShcXP16fc7G4nlgpw= +golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -410,8 +425,8 @@ google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0 h1:yfrXXP61wVuLb0vBcG6qaOoIoqYEzOQS8jum51jkv2w= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.31.0 h1:1w5Sz/puhxFo9lTtip2n47k7toB/U2nCqOKNHd3Yrbo= -google.golang.org/api v0.31.0/go.mod h1:CL+9IBCa2WWU6gRuBWaKqGWLFFwbEUXkfeMkHLQWYWo= +google.golang.org/api v0.35.0 h1:TBCmTTxUrRDA1iTctnK/fIeitxIZ+TQuaf0j29fmCGo= +google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -460,10 +475,9 @@ google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c h1:Lq4llNryJoaVFRmvrIwC/ZHH7tNt4tUYIu8+se2aayY= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200831141814-d751682dd103 h1:z46CEPU+LlO0kGGwrH8h5epkkJhRZbAHYWOWD9JhLPI= -google.golang.org/genproto v0.0.0-20200831141814-d751682dd103/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200914193844-75d14daec038 h1:SnvTpXhVDJGFxzZiHbMUZTh3VjU2Vx2feJ7Zfl5+OIY= -google.golang.org/genproto v0.0.0-20200914193844-75d14daec038/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb h1:MoNcrN5yaH+35Ge8RUwFbL7ekwq9ED2fiDpgWKrR29w= +google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= @@ -485,6 +499,8 @@ google.golang.org/grpc v1.31.0 h1:T7P4R73V3SSDPhH7WW7ATbfViLtmamH0DKrP3f9AuDI= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1 h1:SfXqXS5hkufcdZ/mHtYCh53P2b+92WQq/DZcKLgsFRs= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.2 h1:EQyQC3sa8M+p6Ulc8yy9SWSS2GVwyRc83gAbG8lrl4o= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json index acf6a38aef5..b39a53da9ab 100644 --- a/vendor/cloud.google.com/go/internal/.repo-metadata-full.json +++ b/vendor/cloud.google.com/go/internal/.repo-metadata-full.json @@ -1,4 +1,12 @@ { + "cloud.google.com/go/accessapproval/apiv1": { + "distribution_name": "cloud.google.com/go/accessapproval/apiv1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/accessapproval/apiv1", + "release_level": "beta" + }, "cloud.google.com/go/analytics/admin/apiv1alpha": { "distribution_name": "cloud.google.com/go/analytics/admin/apiv1alpha", "description": "", @@ -55,6 +63,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1", "release_level": "beta" }, + "cloud.google.com/go/assuredworkloads/apiv1beta1": { + "distribution_name": "cloud.google.com/go/assuredworkloads/apiv1beta1", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/assuredworkloads/apiv1beta1", + "release_level": "beta" + }, "cloud.google.com/go/automl/apiv1": { "distribution_name": "cloud.google.com/go/automl/apiv1", "description": "Cloud AutoML API", @@ -167,6 +183,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/apiv1", "release_level": "ga" }, + "cloud.google.com/go/billing/budgets/apiv1": { + "distribution_name": "cloud.google.com/go/billing/budgets/apiv1", + "description": "Cloud Billing Budget API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/billing/budgets/apiv1", + "release_level": "beta" + }, "cloud.google.com/go/billing/budgets/apiv1beta1": { "distribution_name": "cloud.google.com/go/billing/budgets/apiv1beta1", "description": "", @@ -287,6 +311,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/apiv2", "release_level": "ga" }, + "cloud.google.com/go/dialogflow/cx/apiv3beta1": { + "distribution_name": "cloud.google.com/go/dialogflow/cx/apiv3beta1", + "description": "Dialogflow API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/dialogflow/cx/apiv3beta1", + "release_level": "beta" + }, "cloud.google.com/go/dlp/apiv2": { "distribution_name": "cloud.google.com/go/dlp/apiv2", "description": "Cloud Data Loss Prevention (DLP) API", @@ -297,7 +329,7 @@ }, "cloud.google.com/go/errorreporting": { "distribution_name": "cloud.google.com/go/errorreporting", - "description": "Stackdriver Error Reporting API", + "description": "Cloud Error Reporting API", "language": "Go", "client_library_type": "manual", "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting", @@ -305,7 +337,7 @@ }, "cloud.google.com/go/errorreporting/apiv1beta1": { "distribution_name": "cloud.google.com/go/errorreporting/apiv1beta1", - "description": "Stackdriver Error Reporting API", + "description": "Cloud Error Reporting API", "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/errorreporting/apiv1beta1", @@ -341,7 +373,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/functions/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/gaming/apiv1": { "distribution_name": "cloud.google.com/go/gaming/apiv1", @@ -349,7 +381,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/gaming/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/gaming/apiv1beta": { "distribution_name": "cloud.google.com/go/gaming/apiv1beta", @@ -409,7 +441,7 @@ }, "cloud.google.com/go/logging": { "distribution_name": "cloud.google.com/go/logging", - "description": "Stackdriver Logging API", + "description": "Cloud Logging API", "language": "Go", "client_library_type": "manual", "docs_url": "https://pkg.go.dev/cloud.google.com/go/logging", @@ -431,6 +463,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/longrunning/autogen", "release_level": "alpha" }, + "cloud.google.com/go/managedidentities/apiv1": { + "distribution_name": "cloud.google.com/go/managedidentities/apiv1", + "description": "Managed Service for Microsoft Active Directory API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/managedidentities/apiv1", + "release_level": "beta" + }, "cloud.google.com/go/memcache/apiv1beta2": { "distribution_name": "cloud.google.com/go/memcache/apiv1beta2", "description": "Cloud Memorystore for Memcached API", @@ -453,7 +493,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/monitoring/dashboard/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/notebooks/apiv1beta1": { "distribution_name": "cloud.google.com/go/notebooks/apiv1beta1", @@ -525,7 +565,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/policytroubleshooter/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/profiler": { "distribution_name": "cloud.google.com/go/profiler", @@ -557,7 +597,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/pubsublite/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/recaptchaenterprise/apiv1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1", @@ -565,7 +605,7 @@ "language": "Go", "client_library_type": "generated", "docs_url": "https://pkg.go.dev/cloud.google.com/go/recaptchaenterprise/apiv1", - "release_level": "beta" + "release_level": "ga" }, "cloud.google.com/go/recaptchaenterprise/apiv1beta1": { "distribution_name": "cloud.google.com/go/recaptchaenterprise/apiv1beta1", @@ -751,6 +791,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/storage", "release_level": "ga" }, + "cloud.google.com/go/talent/apiv4": { + "distribution_name": "cloud.google.com/go/talent/apiv4", + "description": "", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/talent/apiv4", + "release_level": "beta" + }, "cloud.google.com/go/talent/apiv4beta1": { "distribution_name": "cloud.google.com/go/talent/apiv4beta1", "description": "Cloud Talent Solution API", @@ -855,6 +903,14 @@ "docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1beta1", "release_level": "beta" }, + "cloud.google.com/go/websecurityscanner/apiv1": { + "distribution_name": "cloud.google.com/go/websecurityscanner/apiv1", + "description": "Web Security Scanner API", + "language": "Go", + "client_library_type": "generated", + "docs_url": "https://pkg.go.dev/cloud.google.com/go/websecurityscanner/apiv1", + "release_level": "beta" + }, "cloud.google.com/go/workflows/apiv1beta": { "distribution_name": "cloud.google.com/go/workflows/apiv1beta", "description": "", diff --git a/vendor/cloud.google.com/go/internal/version/version.go b/vendor/cloud.google.com/go/internal/version/version.go index 3774b76d6a2..fd9dd91e985 100644 --- a/vendor/cloud.google.com/go/internal/version/version.go +++ b/vendor/cloud.google.com/go/internal/version/version.go @@ -26,7 +26,7 @@ import ( // Repo is the current version of the client libraries in this // repo. It should be a date in YYYYMMDD format. -const Repo = "20200911" +const Repo = "20201104" // Go returns the Go runtime version. The returned string // has no whitespace. diff --git a/vendor/cloud.google.com/go/longrunning/autogen/doc.go b/vendor/cloud.google.com/go/longrunning/autogen/doc.go index 678e4d3f9c0..a8a6eaa153e 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/doc.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/doc.go @@ -48,7 +48,7 @@ import ( type clientHookParams struct{} type clientHook func(context.Context, clientHookParams) ([]option.ClientOption, error) -const versionClient = "20200912" +const versionClient = "20201110" func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context { out, _ := metadata.FromOutgoingContext(ctx) diff --git a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go index b361c780284..28cdab4b827 100644 --- a/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go +++ b/vendor/cloud.google.com/go/longrunning/autogen/operations_client.go @@ -27,6 +27,7 @@ import ( gax "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" "google.golang.org/api/option" + "google.golang.org/api/option/internaloption" gtransport "google.golang.org/api/transport/grpc" longrunningpb "google.golang.org/genproto/googleapis/longrunning" "google.golang.org/grpc" @@ -47,7 +48,8 @@ type OperationsCallOptions struct { func defaultOperationsClientOptions() []option.ClientOption { return []option.ClientOption{ - option.WithEndpoint("longrunning.googleapis.com:443"), + internaloption.WithDefaultEndpoint("longrunning.googleapis.com:443"), + internaloption.WithDefaultMTLSEndpoint("longrunning.mtls.googleapis.com:443"), option.WithGRPCDialOption(grpc.WithDisableServiceConfig()), option.WithScopes(DefaultAuthScopes()...), option.WithGRPCDialOption(grpc.WithDefaultCallOptions( diff --git a/vendor/cloud.google.com/go/testing.md b/vendor/cloud.google.com/go/testing.md new file mode 100644 index 00000000000..03867d561af --- /dev/null +++ b/vendor/cloud.google.com/go/testing.md @@ -0,0 +1,236 @@ +# Testing Code that depends on Go Client Libraries + +The Go client libraries generated as a part of `cloud.google.com/go` all take +the approach of returning concrete types instead of interfaces. That way, new +fields and methods can be added to the libraries without breaking users. This +document will go over some patterns that can be used to test code that depends +on the Go client libraries. + +## Testing gRPC services using fakes + +*Note*: You can see the full +[example code using a fake here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/fake). + +The clients found in `cloud.google.com/go` are gRPC based, with a couple of +notable exceptions being the [`storage`](https://pkg.go.dev/cloud.google.com/go/storage) +and [`bigquery`](https://pkg.go.dev/cloud.google.com/go/bigquery) clients. +Interactions with gRPC services can be faked by serving up your own in-memory +server within your test. One benefit of using this approach is that you don’t +need to define an interface in your runtime code; you can keep using +concrete struct types. You instead define a fake server in your test code. For +example, take a look at the following function: + +```go +import ( + "context" + "fmt" + "log" + "os" + + translate "cloud.google.com/go/translate/apiv3" + "github.com/googleapis/gax-go/v2" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +func TranslateTextWithConcreteClient(client *translate.TranslationClient, text string, targetLang string) (string, error) { + ctx := context.Background() + log.Printf("Translating %q to %q", text, targetLang) + req := &translatepb.TranslateTextRequest{ + Parent: fmt.Sprintf("projects/%s/locations/global", os.Getenv("GOOGLE_CLOUD_PROJECT")), + TargetLanguageCode: "en-US", + Contents: []string{text}, + } + resp, err := client.TranslateText(ctx, req) + if err != nil { + return "", fmt.Errorf("unable to translate text: %v", err) + } + translations := resp.GetTranslations() + if len(translations) != 1 { + return "", fmt.Errorf("expected only one result, got %d", len(translations)) + } + return translations[0].TranslatedText, nil +} +``` + +Here is an example of what a fake server implementation would look like for +faking the interactions above: + +```go +import ( + "context" + + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +type fakeTranslationServer struct { + translatepb.UnimplementedTranslationServiceServer +} + +func (f *fakeTranslationServer) TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest) (*translatepb.TranslateTextResponse, error) { + resp := &translatepb.TranslateTextResponse{ + Translations: []*translatepb.Translation{ + &translatepb.Translation{ + TranslatedText: "Hello World", + }, + }, + } + return resp, nil +} +``` + +All of the generated protobuf code found in [google.golang.org/genproto](https://pkg.go.dev/google.golang.org/genproto) +contains a similar `package.UnimplmentedFooServer` type that is useful for +creating fakes. By embedding the unimplemented server in the +`fakeTranslationServer`, the fake will “inherit” all of the RPCs the server +exposes. Then, by providing our own `fakeTranslationServer.TranslateText` +method you can “override” the default unimplemented behavior of the one RPC that +you would like to be faked. + +The test itself does require a little bit of setup: start up a `net.Listener`, +register the server, and tell the client library to call the server: + +```go +import ( + "context" + "net" + "testing" + + translate "cloud.google.com/go/translate/apiv3" + "google.golang.org/api/option" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" + "google.golang.org/grpc" +) + +func TestTranslateTextWithConcreteClient(t *testing.T) { + ctx := context.Background() + + // Setup the fake server. + fakeTranslationServer := &fakeTranslationServer{} + l, err := net.Listen("tcp", "localhost:0") + if err != nil { + t.Fatal(err) + } + gsrv := grpc.NewServer() + translatepb.RegisterTranslationServiceServer(gsrv, fakeTranslationServer) + fakeServerAddr := l.Addr().String() + go func() { + if err := gsrv.Serve(l); err != nil { + panic(err) + } + }() + + // Create a client. + client, err := translate.NewTranslationClient(ctx, + option.WithEndpoint(fakeServerAddr), + option.WithoutAuthentication(), + option.WithGRPCDialOption(grpc.WithInsecure()), + ) + if err != nil { + t.Fatal(err) + } + + // Run the test. + text, err := TranslateTextWithConcreteClient(client, "Hola Mundo", "en-US") + if err != nil { + t.Fatal(err) + } + if text != "Hello World" { + t.Fatalf("got %q, want Hello World", text) + } +} +``` + +## Testing using mocks + +*Note*: You can see the full +[example code using a mock here](https://github.com/googleapis/google-cloud-go/tree/master/internal/examples/mock). + +When mocking code you need to work with interfaces. Let’s create an interface +for the `cloud.google.com/go/translate/apiv3` client used in the +`TranslateTextWithConcreteClient` function mentioned in the previous section. +The `translate.Client` has over a dozen methods but this code only uses one of +them. Here is an interface that satisfies the interactions of the +`translate.Client` in this function. + +```go +type TranslationClient interface { + TranslateText(ctx context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) +} +``` + +Now that we have an interface that satisfies the method being used we can +rewrite the function signature to take the interface instead of the concrete +type. + +```go +func TranslateTextWithInterfaceClient(client TranslationClient, text string, targetLang string) (string, error) { +// ... +} +``` + +This allows a real `translate.Client` to be passed to the method in production +and for a mock implementation to be passed in during testing. This pattern can +be applied to any Go code, not just `cloud.google.com/go`. This is because +interfaces in Go are implicitly satisfied. Structs in the client libraries can +implicitly implement interfaces defined in your codebase. Let’s take a look at +what it might look like to define a lightweight mock for the `TranslationClient` +interface. + +```go +import ( + "context" + "testing" + + "github.com/googleapis/gax-go/v2" + translatepb "google.golang.org/genproto/googleapis/cloud/translate/v3" +) + +type mockClient struct{} + +func (*mockClient) TranslateText(_ context.Context, req *translatepb.TranslateTextRequest, opts ...gax.CallOption) (*translatepb.TranslateTextResponse, error) { + resp := &translatepb.TranslateTextResponse{ + Translations: []*translatepb.Translation{ + &translatepb.Translation{ + TranslatedText: "Hello World", + }, + }, + } + return resp, nil +} + +func TestTranslateTextWithAbstractClient(t *testing.T) { + client := &mockClient{} + text, err := TranslateTextWithInterfaceClient(client, "Hola Mundo", "en-US") + if err != nil { + t.Fatal(err) + } + if text != "Hello World" { + t.Fatalf("got %q, want Hello World", text) + } +} +``` + +If you prefer to not write your own mocks there are mocking frameworks such as +[golang/mock](https://github.com/golang/mock) which can generate mocks for you +from an interface. As a word of caution though, try to not +[overuse mocks](https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html). + +## Testing using emulators + +Some of the client libraries provided in `cloud.google.com/go` support running +against a service emulator. The concept is similar to that of using fakes, +mentioned above, but the server is managed for you. You just need to start it up +and instruct the client library to talk to the emulator by setting a service +specific emulator environment variable. Current services/environment-variables +are: + +- bigtable: `BIGTABLE_EMULATOR_HOST` +- datastore: `DATASTORE_EMULATOR_HOST` +- firestore: `FIRESTORE_EMULATOR_HOST` +- pubsub: `PUBSUB_EMULATOR_HOST` +- spanner: `SPANNER_EMULATOR_HOST` +- storage: `STORAGE_EMULATOR_HOST` + - Although the storage client supports an emulator environment variable there is no official emulator provided by gcloud. + +For more information on emulators please refer to the +[gcloud documentation](https://cloud.google.com/sdk/gcloud/reference/beta/emulators). diff --git a/vendor/github.com/alecthomas/units/go.mod b/vendor/github.com/alecthomas/units/go.mod index c7fb91f2b27..f77ddea73a9 100644 --- a/vendor/github.com/alecthomas/units/go.mod +++ b/vendor/github.com/alecthomas/units/go.mod @@ -1,3 +1,5 @@ module github.com/alecthomas/units +go 1.15 + require github.com/stretchr/testify v1.4.0 diff --git a/vendor/github.com/armon/go-metrics/.gitignore b/vendor/github.com/armon/go-metrics/.gitignore index 8c03ec112a4..e5750f5720e 100644 --- a/vendor/github.com/armon/go-metrics/.gitignore +++ b/vendor/github.com/armon/go-metrics/.gitignore @@ -22,3 +22,5 @@ _testmain.go *.exe /metrics.out + +.idea diff --git a/vendor/github.com/armon/go-metrics/metrics.go b/vendor/github.com/armon/go-metrics/metrics.go index 457b74bb53c..6753b13bb28 100644 --- a/vendor/github.com/armon/go-metrics/metrics.go +++ b/vendor/github.com/armon/go-metrics/metrics.go @@ -228,12 +228,12 @@ func (m *Metrics) allowMetric(key []string, labels []Label) (bool, []Label) { func (m *Metrics) collectStats() { for { time.Sleep(m.ProfileInterval) - m.emitRuntimeStats() + m.EmitRuntimeStats() } } // Emits various runtime statsitics -func (m *Metrics) emitRuntimeStats() { +func (m *Metrics) EmitRuntimeStats() { // Export number of Goroutines numRoutines := runtime.NumGoroutine() m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) diff --git a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go index 8a23424fbec..1dcf53053a6 100644 --- a/vendor/github.com/armon/go-metrics/prometheus/prometheus.go +++ b/vendor/github.com/armon/go-metrics/prometheus/prometheus.go @@ -5,12 +5,12 @@ package prometheus import ( "fmt" "log" + "math" + "regexp" "strings" "sync" "time" - "regexp" - "github.com/armon/go-metrics" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/push" @@ -29,6 +29,27 @@ type PrometheusOpts struct { // Expiration is the duration a metric is valid for, after which it will be // untracked. If the value is zero, a metric is never expired. Expiration time.Duration + Registerer prometheus.Registerer + + // Gauges, Summaries, and Counters allow us to pre-declare metrics by giving their Name, Help, and ConstLabels to + // the PrometheusSink when it is created. Metrics declared in this way will be initialized at zero and will not be + // deleted when their expiry is reached. + // - Gauges and Summaries will be set to NaN when they expire. + // - Counters continue to Collect their last known value. + // Ex: + // PrometheusOpts{ + // Expiration: 10 * time.Second, + // Gauges: []GaugeDefinition{ + // { + // Name: []string{ "application", "component", "measurement"}, + // Help: "application_component_measurement provides an example of how to declare static metrics", + // ConstLabels: []metrics.Label{ { Name: "my_label", Value: "does_not_change" }, }, + // }, + // }, + // } + GaugeDefinitions []GaugeDefinition + SummaryDefinitions []SummaryDefinition + CounterDefinitions []CounterDefinition } type PrometheusSink struct { @@ -36,8 +57,48 @@ type PrometheusSink struct { gauges sync.Map summaries sync.Map counters sync.Map - updates sync.Map expiration time.Duration + help map[string]string +} + +// GaugeDefinition can be provided to PrometheusOpts to declare a constant gauge that is not deleted on expiry. +type GaugeDefinition struct { + Name []string + ConstLabels []metrics.Label + Help string +} + +type gauge struct { + prometheus.Gauge + updatedAt time.Time + // canDelete is set if the metric is created during runtime so we know it's ephemeral and can delete it on expiry. + canDelete bool +} + +// SummaryDefinition can be provided to PrometheusOpts to declare a constant summary that is not deleted on expiry. +type SummaryDefinition struct { + Name []string + ConstLabels []metrics.Label + Help string +} + +type summary struct { + prometheus.Summary + updatedAt time.Time + canDelete bool +} + +// CounterDefinition can be provided to PrometheusOpts to declare a constant counter that is not deleted on expiry. +type CounterDefinition struct { + Name []string + ConstLabels []metrics.Label + Help string +} + +type counter struct { + prometheus.Counter + updatedAt time.Time + canDelete bool } // NewPrometheusSink creates a new PrometheusSink using the default options. @@ -51,11 +112,20 @@ func NewPrometheusSinkFrom(opts PrometheusOpts) (*PrometheusSink, error) { gauges: sync.Map{}, summaries: sync.Map{}, counters: sync.Map{}, - updates: sync.Map{}, expiration: opts.Expiration, + help: make(map[string]string), + } + + initGauges(&sink.gauges, opts.GaugeDefinitions, sink.help) + initSummaries(&sink.summaries, opts.SummaryDefinitions, sink.help) + initCounters(&sink.counters, opts.CounterDefinitions, sink.help) + + reg := opts.Registerer + if reg == nil { + reg = prometheus.DefaultRegisterer } - return sink, prometheus.Register(sink) + return sink, reg.Register(sink) } // Describe is needed to meet the Collector interface. @@ -72,40 +142,104 @@ func (p *PrometheusSink) Collect(c chan<- prometheus.Metric) { expire := p.expiration != 0 now := time.Now() p.gauges.Range(func(k, v interface{}) bool { - last, _ := p.updates.Load(k) - if expire && last.(time.Time).Add(p.expiration).Before(now) { - p.updates.Delete(k) - p.gauges.Delete(k) - } else { - v.(prometheus.Gauge).Collect(c) + if v == nil { + return true } + g := v.(*gauge) + lastUpdate := g.updatedAt + if expire && lastUpdate.Add(p.expiration).Before(now) { + if g.canDelete { + p.gauges.Delete(k) + return true + } + // We have not observed the gauge this interval so we don't know its value. + g.Set(math.NaN()) + } + g.Collect(c) return true }) p.summaries.Range(func(k, v interface{}) bool { - last, _ := p.updates.Load(k) - if expire && last.(time.Time).Add(p.expiration).Before(now) { - p.updates.Delete(k) - p.summaries.Delete(k) - } else { - v.(prometheus.Summary).Collect(c) + if v == nil { + return true + } + s := v.(*summary) + lastUpdate := s.updatedAt + if expire && lastUpdate.Add(p.expiration).Before(now) { + if s.canDelete { + p.summaries.Delete(k) + return true + } + // We have observed nothing in this interval. + s.Observe(math.NaN()) } + s.Collect(c) return true }) p.counters.Range(func(k, v interface{}) bool { - last, _ := p.updates.Load(k) - if expire && last.(time.Time).Add(p.expiration).Before(now) { - p.updates.Delete(k) - p.counters.Delete(k) - } else { - v.(prometheus.Counter).Collect(c) + if v == nil { + return true + } + count := v.(*counter) + lastUpdate := count.updatedAt + if expire && lastUpdate.Add(p.expiration).Before(now) { + if count.canDelete { + p.counters.Delete(k) + return true + } + // Counters remain at their previous value when not observed, so we do not set it to NaN. } + count.Collect(c) return true }) } +func initGauges(m *sync.Map, gauges []GaugeDefinition, help map[string]string) { + for _, g := range gauges { + key, hash := flattenKey(g.Name, g.ConstLabels) + help[fmt.Sprintf("gauge.%s", key)] = g.Help + pG := prometheus.NewGauge(prometheus.GaugeOpts{ + Name: key, + Help: g.Help, + ConstLabels: prometheusLabels(g.ConstLabels), + }) + m.Store(hash, &gauge{Gauge: pG}) + } + return +} + +func initSummaries(m *sync.Map, summaries []SummaryDefinition, help map[string]string) { + for _, s := range summaries { + key, hash := flattenKey(s.Name, s.ConstLabels) + help[fmt.Sprintf("summary.%s", key)] = s.Help + pS := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: key, + Help: s.Help, + MaxAge: 10 * time.Second, + ConstLabels: prometheusLabels(s.ConstLabels), + Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, + }) + m.Store(hash, &summary{Summary: pS}) + } + return +} + +func initCounters(m *sync.Map, counters []CounterDefinition, help map[string]string) { + for _, c := range counters { + key, hash := flattenKey(c.Name, c.ConstLabels) + help[fmt.Sprintf("counter.%s", key)] = c.Help + pC := prometheus.NewCounter(prometheus.CounterOpts{ + Name: key, + Help: c.Help, + ConstLabels: prometheusLabels(c.ConstLabels), + }) + m.Store(hash, &counter{Counter: pC}) + } + return +} + var forbiddenChars = regexp.MustCompile("[ .=\\-/]") -func (p *PrometheusSink) flattenKey(parts []string, labels []metrics.Label) (string, string) { +func flattenKey(parts []string, labels []metrics.Label) (string, string) { key := strings.Join(parts, "_") key = forbiddenChars.ReplaceAllString(key, "_") @@ -130,18 +264,41 @@ func (p *PrometheusSink) SetGauge(parts []string, val float32) { } func (p *PrometheusSink) SetGaugeWithLabels(parts []string, val float32, labels []metrics.Label) { - key, hash := p.flattenKey(parts, labels) - g, ok := p.gauges.Load(hash) - if !ok { - g = prometheus.NewGauge(prometheus.GaugeOpts{ + key, hash := flattenKey(parts, labels) + pg, ok := p.gauges.Load(hash) + + // The sync.Map underlying gauges stores pointers to our structs. If we need to make updates, + // rather than modifying the underlying value directly, which would be racy, we make a local + // copy by dereferencing the pointer we get back, making the appropriate changes, and then + // storing a pointer to our local copy. The underlying Prometheus types are threadsafe, + // so there's no issues there. It's possible for racy updates to occur to the updatedAt + // value, but since we're always setting it to time.Now(), it doesn't really matter. + if ok { + localGauge := *pg.(*gauge) + localGauge.Set(float64(val)) + localGauge.updatedAt = time.Now() + p.gauges.Store(hash, &localGauge) + + // The gauge does not exist, create the gauge and allow it to be deleted + } else { + help := key + existingHelp, ok := p.help[fmt.Sprintf("gauge.%s", key)] + if ok { + help = existingHelp + } + g := prometheus.NewGauge(prometheus.GaugeOpts{ Name: key, - Help: key, + Help: help, ConstLabels: prometheusLabels(labels), }) - p.gauges.Store(hash, g) + g.Set(float64(val)) + pg = &gauge{ + Gauge: g, + updatedAt: time.Now(), + canDelete: true, + } + p.gauges.Store(hash, pg) } - g.(prometheus.Gauge).Set(float64(val)) - p.updates.Store(hash, time.Now()) } func (p *PrometheusSink) AddSample(parts []string, val float32) { @@ -149,20 +306,38 @@ func (p *PrometheusSink) AddSample(parts []string, val float32) { } func (p *PrometheusSink) AddSampleWithLabels(parts []string, val float32, labels []metrics.Label) { - key, hash := p.flattenKey(parts, labels) - g, ok := p.summaries.Load(hash) - if !ok { - g = prometheus.NewSummary(prometheus.SummaryOpts{ + key, hash := flattenKey(parts, labels) + ps, ok := p.summaries.Load(hash) + + // Does the summary already exist for this sample type? + if ok { + localSummary := *ps.(*summary) + localSummary.Observe(float64(val)) + localSummary.updatedAt = time.Now() + p.summaries.Store(hash, &localSummary) + + // The summary does not exist, create the Summary and allow it to be deleted + } else { + help := key + existingHelp, ok := p.help[fmt.Sprintf("summary.%s", key)] + if ok { + help = existingHelp + } + s := prometheus.NewSummary(prometheus.SummaryOpts{ Name: key, - Help: key, + Help: help, MaxAge: 10 * time.Second, ConstLabels: prometheusLabels(labels), Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) - p.summaries.Store(hash, g) + s.Observe(float64(val)) + ps = &summary{ + Summary: s, + updatedAt: time.Now(), + canDelete: true, + } + p.summaries.Store(hash, ps) } - g.(prometheus.Summary).Observe(float64(val)) - p.updates.Store(hash, time.Now()) } // EmitKey is not implemented. Prometheus doesn’t offer a type for which an @@ -176,20 +351,40 @@ func (p *PrometheusSink) IncrCounter(parts []string, val float32) { } func (p *PrometheusSink) IncrCounterWithLabels(parts []string, val float32, labels []metrics.Label) { - key, hash := p.flattenKey(parts, labels) - g, ok := p.counters.Load(hash) - if !ok { - g = prometheus.NewCounter(prometheus.CounterOpts{ + key, hash := flattenKey(parts, labels) + pc, ok := p.counters.Load(hash) + + // Does the counter exist? + if ok { + localCounter := *pc.(*counter) + localCounter.Add(float64(val)) + localCounter.updatedAt = time.Now() + p.counters.Store(hash, &localCounter) + + // The counter does not exist yet, create it and allow it to be deleted + } else { + help := key + existingHelp, ok := p.help[fmt.Sprintf("counter.%s", key)] + if ok { + help = existingHelp + } + c := prometheus.NewCounter(prometheus.CounterOpts{ Name: key, - Help: key, + Help: help, ConstLabels: prometheusLabels(labels), }) - p.counters.Store(hash, g) + c.Add(float64(val)) + pc = &counter{ + Counter: c, + updatedAt: time.Now(), + canDelete: true, + } + p.counters.Store(hash, pc) } - g.(prometheus.Counter).Add(float64(val)) - p.updates.Store(hash, time.Now()) } +// PrometheusPushSink wraps a normal prometheus sink and provides an address and facilities to export it to an address +// on an interval. type PrometheusPushSink struct { *PrometheusSink pusher *push.Pusher @@ -198,13 +393,12 @@ type PrometheusPushSink struct { stopChan chan struct{} } -func NewPrometheusPushSink(address string, pushIterval time.Duration, name string) (*PrometheusPushSink, error) { - +// NewPrometheusPushSink creates a PrometheusPushSink by taking an address, interval, and destination name. +func NewPrometheusPushSink(address string, pushInterval time.Duration, name string) (*PrometheusPushSink, error) { promSink := &PrometheusSink{ gauges: sync.Map{}, summaries: sync.Map{}, counters: sync.Map{}, - updates: sync.Map{}, expiration: 60 * time.Second, } @@ -214,7 +408,7 @@ func NewPrometheusPushSink(address string, pushIterval time.Duration, name strin promSink, pusher, address, - pushIterval, + pushInterval, make(chan struct{}), } diff --git a/vendor/github.com/armon/go-metrics/start.go b/vendor/github.com/armon/go-metrics/start.go index 32a28c48378..6aa0bd389aa 100644 --- a/vendor/github.com/armon/go-metrics/start.go +++ b/vendor/github.com/armon/go-metrics/start.go @@ -6,7 +6,7 @@ import ( "sync/atomic" "time" - "github.com/hashicorp/go-immutable-radix" + iradix "github.com/hashicorp/go-immutable-radix" ) // Config is used to configure metrics settings @@ -48,6 +48,11 @@ func init() { globalMetrics.Store(&Metrics{sink: &BlackholeSink{}}) } +// Default returns the shared global metrics instance. +func Default() *Metrics { + return globalMetrics.Load().(*Metrics) +} + // DefaultConfig provides a sane default configuration func DefaultConfig(serviceName string) *Config { c := &Config{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 1b447aea580..35db88d995c 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -344,6 +344,20 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "airflow": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "api.detective": service{ Defaults: endpoint{ Protocols: []string{"https"}, @@ -664,6 +678,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "app-integrations": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "appflow": service{ Endpoints: endpoints{ @@ -801,12 +827,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "athena-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "athena-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "athena-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "athena-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "autoscaling": service{ @@ -966,8 +1016,7 @@ var awsPartition = partition{ PartitionEndpoint: "aws-global", IsRegionalized: boxedFalse, Defaults: endpoint{ - SSLCommonName: "service.chime.aws.amazon.com", - Protocols: []string{"https"}, + Protocols: []string{"https"}, }, Endpoints: endpoints{ "aws-global": endpoint{ @@ -1206,6 +1255,7 @@ var awsPartition = partition{ "codebuild": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1330,9 +1380,25 @@ var awsPartition = partition{ }, }, }, + "codeguru-reviewer": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, + "eu-west-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "codepipeline": service{ Endpoints: endpoints{ + "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, @@ -1430,8 +1496,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "fips-us-east-1": endpoint{ Hostname: "cognito-identity-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1450,8 +1518,10 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1465,8 +1535,10 @@ var awsPartition = partition{ "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, + "eu-north-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "fips-us-east-1": endpoint{ Hostname: "cognito-idp-fips.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -1485,8 +1557,10 @@ var awsPartition = partition{ Region: "us-west-2", }, }, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, + "us-west-1": endpoint{}, "us-west-2": endpoint{}, }, }, @@ -1576,6 +1650,7 @@ var awsPartition = partition{ "config": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -1585,15 +1660,40 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "config-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "config-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "config-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "config-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "connect": service{ @@ -1608,6 +1708,14 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "contact-lens": service{ + + Endpoints: endpoints{ + "ap-southeast-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "cur": service{ Endpoints: endpoints{ @@ -1888,6 +1996,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "sa-east-1": endpoint{ + Hostname: "rds.sa-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "sa-east-1", + }, + }, "us-east-1": endpoint{ Hostname: "rds.us-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2612,6 +2726,14 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "emr-containers": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "entitlement.marketplace": service{ Defaults: endpoint{ CredentialScope: credentialScope{ @@ -2770,6 +2892,18 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, + "fips-af-south-1": endpoint{ + Hostname: "fms-fips.af-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "af-south-1", + }, + }, + "fips-ap-east-1": endpoint{ + Hostname: "fms-fips.ap-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "ap-east-1", + }, + }, "fips-ap-northeast-1": endpoint{ Hostname: "fms-fips.ap-northeast-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2812,6 +2946,12 @@ var awsPartition = partition{ Region: "eu-central-1", }, }, + "fips-eu-south-1": endpoint{ + Hostname: "fms-fips.eu-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-south-1", + }, + }, "fips-eu-west-1": endpoint{ Hostname: "fms-fips.eu-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -2830,6 +2970,12 @@ var awsPartition = partition{ Region: "eu-west-3", }, }, + "fips-me-south-1": endpoint{ + Hostname: "fms-fips.me-south-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "me-south-1", + }, + }, "fips-sa-east-1": endpoint{ Hostname: "fms-fips.sa-east-1.amazonaws.com", CredentialScope: credentialScope{ @@ -3151,6 +3297,14 @@ var awsPartition = partition{ }, }, }, + "healthlake": service{ + Defaults: endpoint{ + Protocols: []string{"https"}, + }, + Endpoints: endpoints{ + "us-east-1": endpoint{}, + }, + }, "honeycode": service{ Endpoints: endpoints{ @@ -3407,6 +3561,23 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "iotwireless": service{ + + Endpoints: endpoints{ + "eu-west-1": endpoint{ + Hostname: "api.iotwireless.eu-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "eu-west-1", + }, + }, + "us-east-1": endpoint{ + Hostname: "api.iotwireless.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + }, + }, "kafka": service{ Endpoints: endpoints{ @@ -3760,6 +3931,18 @@ var awsPartition = partition{ "us-west-2": endpoint{}, }, }, + "lookoutvision": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "machinelearning": service{ Endpoints: endpoints{ @@ -3789,6 +3972,7 @@ var awsPartition = partition{ "macie2": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -3798,6 +3982,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -3825,11 +4010,12 @@ var awsPartition = partition{ Region: "us-west-2", }, }, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, "managedblockchain": service{ @@ -4616,6 +4802,18 @@ var awsPartition = partition{ }, }, }, + "profile": service{ + + Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-southeast-1": endpoint{}, + "ap-southeast-2": endpoint{}, + "eu-central-1": endpoint{}, + "eu-west-2": endpoint{}, + "us-east-1": endpoint{}, + "us-west-2": endpoint{}, + }, + }, "projects.iot1click": service{ Endpoints: endpoints{ @@ -5499,6 +5697,7 @@ var awsPartition = partition{ "servicediscovery": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -5508,6 +5707,7 @@ var awsPartition = partition{ "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-north-1": endpoint{}, + "eu-south-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, @@ -5640,6 +5840,7 @@ var awsPartition = partition{ "snowball": service{ Endpoints: endpoints{ + "af-south-1": endpoint{}, "ap-east-1": endpoint{}, "ap-northeast-1": endpoint{}, "ap-northeast-2": endpoint{}, @@ -6253,11 +6454,14 @@ var awsPartition = partition{ "transcribestreaming": service{ Endpoints: endpoints{ + "ap-northeast-1": endpoint{}, + "ap-northeast-2": endpoint{}, "ap-southeast-2": endpoint{}, "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, "us-west-2": endpoint{}, @@ -6697,12 +6901,36 @@ var awsPartition = partition{ "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, "eu-west-3": endpoint{}, - "me-south-1": endpoint{}, - "sa-east-1": endpoint{}, - "us-east-1": endpoint{}, - "us-east-2": endpoint{}, - "us-west-1": endpoint{}, - "us-west-2": endpoint{}, + "fips-us-east-1": endpoint{ + Hostname: "xray-fips.us-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-1", + }, + }, + "fips-us-east-2": endpoint{ + Hostname: "xray-fips.us-east-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-east-2", + }, + }, + "fips-us-west-1": endpoint{ + Hostname: "xray-fips.us-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-1", + }, + }, + "fips-us-west-2": endpoint{ + Hostname: "xray-fips.us-west-2.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-west-2", + }, + }, + "me-south-1": endpoint{}, + "sa-east-1": endpoint{}, + "us-east-1": endpoint{}, + "us-east-2": endpoint{}, + "us-west-1": endpoint{}, + "us-west-2": endpoint{}, }, }, }, @@ -6951,6 +7179,17 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "docdb": service{ + + Endpoints: endpoints{ + "cn-northwest-1": endpoint{ + Hostname: "rds.cn-northwest-1.amazonaws.com.cn", + CredentialScope: credentialScope{ + Region: "cn-northwest-1", + }, + }, + }, + }, "ds": service{ Endpoints: endpoints{ @@ -7278,12 +7517,6 @@ var awscnPartition = partition{ Region: "cn-northwest-1", }, }, - "fips-aws-cn-global": endpoint{ - Hostname: "organizations.cn-northwest-1.amazonaws.com.cn", - CredentialScope: credentialScope{ - Region: "cn-northwest-1", - }, - }, }, }, "polly": service{ @@ -7292,6 +7525,13 @@ var awscnPartition = partition{ "cn-northwest-1": endpoint{}, }, }, + "ram": service{ + + Endpoints: endpoints{ + "cn-north-1": endpoint{}, + "cn-northwest-1": endpoint{}, + }, + }, "rds": service{ Endpoints: endpoints{ @@ -7977,6 +8217,18 @@ var awsusgovPartition = partition{ "config": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "config.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "config.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -8306,6 +8558,25 @@ var awsusgovPartition = partition{ "us-gov-west-1": endpoint{}, }, }, + "fsx": service{ + + Endpoints: endpoints{ + "fips-prod-us-gov-east-1": endpoint{ + Hostname: "fsx-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-prod-us-gov-west-1": endpoint{ + Hostname: "fsx-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, + "us-gov-east-1": endpoint{}, + "us-gov-west-1": endpoint{}, + }, + }, "glacier": service{ Endpoints: endpoints{ @@ -8349,12 +8620,25 @@ var awsusgovPartition = partition{ Protocols: []string{"https"}, }, Endpoints: endpoints{ + "dataplane-us-gov-east-1": endpoint{ + Hostname: "greengrass-ats.iot.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "dataplane-us-gov-west-1": endpoint{ Hostname: "greengrass-ats.iot.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + "fips-us-gov-east-1": endpoint{ + Hostname: "greengrass-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{ Hostname: "greengrass.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -8370,6 +8654,12 @@ var awsusgovPartition = partition{ }, Endpoints: endpoints{ "us-gov-east-1": endpoint{}, + "us-gov-east-1-fips": endpoint{ + Hostname: "guardduty.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{}, "us-gov-west-1-fips": endpoint{ Hostname: "guardduty.us-gov-west-1.amazonaws.com", @@ -9198,12 +9488,24 @@ var awsusgovPartition = partition{ "waf-regional": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "waf-regional-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "fips-us-gov-west-1": endpoint{ Hostname: "waf-regional-fips.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ Region: "us-gov-west-1", }, }, + "us-gov-east-1": endpoint{ + Hostname: "waf-regional.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, "us-gov-west-1": endpoint{ Hostname: "waf-regional.us-gov-west-1.amazonaws.com", CredentialScope: credentialScope{ @@ -9227,6 +9529,18 @@ var awsusgovPartition = partition{ "xray": service{ Endpoints: endpoints{ + "fips-us-gov-east-1": endpoint{ + Hostname: "xray-fips.us-gov-east-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-east-1", + }, + }, + "fips-us-gov-west-1": endpoint{ + Hostname: "xray-fips.us-gov-west-1.amazonaws.com", + CredentialScope: credentialScope{ + Region: "us-gov-west-1", + }, + }, "us-gov-east-1": endpoint{}, "us-gov-west-1": endpoint{}, }, @@ -9531,6 +9845,12 @@ var awsisoPartition = partition{ }, }, }, + "secretsmanager": service{ + + Endpoints: endpoints{ + "us-iso-east-1": endpoint{}, + }, + }, "snowball": service{ Endpoints: endpoints{ @@ -9692,6 +10012,12 @@ var awsisobPartition = partition{ "us-isob-east-1": endpoint{}, }, }, + "codedeploy": service{ + + Endpoints: endpoints{ + "us-isob-east-1": endpoint{}, + }, + }, "config": service{ Endpoints: endpoints{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go new file mode 100644 index 00000000000..593aedc4218 --- /dev/null +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport.go @@ -0,0 +1,27 @@ +// +build go1.13 + +package session + +import ( + "net" + "net/http" + "time" +) + +// Transport that should be used when a custom CA bundle is specified with the +// SDK. +func getCustomTransport() *http.Transport { + return &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + }).DialContext, + ForceAttemptHTTP2: true, + MaxIdleConns: 100, + IdleConnTimeout: 90 * time.Second, + TLSHandshakeTimeout: 10 * time.Second, + ExpectContinueTimeout: 1 * time.Second, + } +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go similarity index 88% rename from vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go rename to vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go index ea9ebb6f6a2..1bf31cf8e56 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.12.go @@ -1,4 +1,4 @@ -// +build go1.7 +// +build !go1.13,go1.7 package session @@ -10,7 +10,7 @@ import ( // Transport that should be used when a custom CA bundle is specified with the // SDK. -func getCABundleTransport() *http.Transport { +func getCustomTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go similarity index 88% rename from vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go rename to vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go index fec39dfc126..253d7bc9d55 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_5.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.5.go @@ -10,7 +10,7 @@ import ( // Transport that should be used when a custom CA bundle is specified with the // SDK. -func getCABundleTransport() *http.Transport { +func getCustomTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go similarity index 90% rename from vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go rename to vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go index 1c5a5391e65..db240605441 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/cabundle_transport_1_6.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/custom_transport_go1.6.go @@ -10,7 +10,7 @@ import ( // Transport that should be used when a custom CA bundle is specified with the // SDK. -func getCABundleTransport() *http.Transport { +func getCustomTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, Dial: (&net.Dialer{ diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go index cc461bd3230..9419b518d58 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/doc.go @@ -208,6 +208,8 @@ env values as well. AWS_SDK_LOAD_CONFIG=1 +Custom Shared Config and Credential Files + Shared credentials file path can be set to instruct the SDK to use an alternative file for the shared credentials. If not set the file will be loaded from $HOME/.aws/credentials on Linux/Unix based systems, and @@ -222,6 +224,8 @@ $HOME/.aws/config on Linux/Unix based systems, and AWS_CONFIG_FILE=$HOME/my_shared_config +Custom CA Bundle + Path to a custom Credentials Authority (CA) bundle PEM file that the SDK will use instead of the default system's root CA bundle. Use this only if you want to replace the CA bundle the SDK uses for TLS requests. @@ -242,6 +246,29 @@ Setting a custom HTTPClient in the aws.Config options will override this setting To use this option and custom HTTP client, the HTTP client needs to be provided when creating the session. Not the service client. +Custom Client TLS Certificate + +The SDK supports the environment and session option being configured with +Client TLS certificates that are sent as a part of the client's TLS handshake +for client authentication. If used, both Cert and Key values are required. If +one is missing, or either fail to load the contents of the file an error will +be returned. + +HTTP Client's Transport concrete implementation must be a http.Transport +or creating the session will fail. + + AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + +This can also be configured via the session.Options ClientTLSCert and ClientTLSKey. + + sess, err := session.NewSessionWithOptions(session.Options{ + ClientTLSCert: myCertFile, + ClientTLSKey: myKeyFile, + }) + +Custom EC2 IMDS Endpoint + The endpoint of the EC2 IMDS client can be configured via the environment variable, AWS_EC2_METADATA_SERVICE_ENDPOINT when creating the client with a Session. See Options.EC2IMDSEndpoint for more details. diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go index d67c261d74f..3cd5d4b5ae1 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/env_config.go @@ -101,6 +101,18 @@ type envConfig struct { // AWS_CA_BUNDLE=$HOME/my_custom_ca_bundle CustomCABundle string + // Sets the TLC client certificate that should be used by the SDK's HTTP transport + // when making requests. The certificate must be paired with a TLS client key file. + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert string + + // Sets the TLC client key that should be used by the SDK's HTTP transport + // when making requests. The key must be paired with a TLS client certificate file. + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey string + csmEnabled string CSMEnabled *bool CSMPort string @@ -219,6 +231,15 @@ var ( ec2IMDSEndpointEnvKey = []string{ "AWS_EC2_METADATA_SERVICE_ENDPOINT", } + useCABundleKey = []string{ + "AWS_CA_BUNDLE", + } + useClientTLSCert = []string{ + "AWS_SDK_GO_CLIENT_TLS_CERT", + } + useClientTLSKey = []string{ + "AWS_SDK_GO_CLIENT_TLS_KEY", + } ) // loadEnvConfig retrieves the SDK's environment configuration. @@ -302,7 +323,9 @@ func envConfigLoad(enableSharedConfig bool) (envConfig, error) { cfg.SharedConfigFile = defaults.SharedConfigFilename() } - cfg.CustomCABundle = os.Getenv("AWS_CA_BUNDLE") + setFromEnvVal(&cfg.CustomCABundle, useCABundleKey) + setFromEnvVal(&cfg.ClientTLSCert, useClientTLSCert) + setFromEnvVal(&cfg.ClientTLSKey, useClientTLSKey) var err error // STS Regional Endpoint variable diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 6430a7f1526..08713cc3474 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -25,6 +25,13 @@ const ( // ErrCodeSharedConfig represents an error that occurs in the shared // configuration logic ErrCodeSharedConfig = "SharedConfigErr" + + // ErrCodeLoadCustomCABundle error code for unable to load custom CA bundle. + ErrCodeLoadCustomCABundle = "LoadCustomCABundleError" + + // ErrCodeLoadClientTLSCert error code for unable to load client TLS + // certificate or key + ErrCodeLoadClientTLSCert = "LoadClientTLSCertError" ) // ErrSharedConfigSourceCollision will be returned if a section contains both @@ -229,17 +236,46 @@ type Options struct { // the SDK will use instead of the default system's root CA bundle. Use this // only if you want to replace the CA bundle the SDK uses for TLS requests. // - // Enabling this option will attempt to merge the Transport into the SDK's HTTP - // client. If the client's Transport is not a http.Transport an error will be - // returned. If the Transport's TLS config is set this option will cause the SDK + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // If the Transport's TLS config is set this option will cause the SDK // to overwrite the Transport's TLS config's RootCAs value. If the CA // bundle reader contains multiple certificates all of them will be loaded. // - // The Session option CustomCABundle is also available when creating sessions - // to also enable this feature. CustomCABundle session option field has priority - // over the AWS_CA_BUNDLE environment variable, and will be used if both are set. + // Can also be specified via the environment variable: + // + // AWS_CA_BUNDLE=$HOME/ca_bundle + // + // Can also be specified via the shared config field: + // + // ca_bundle = $HOME/ca_bundle CustomCABundle io.Reader + // Reader for the TLC client certificate that should be used by the SDK's + // HTTP transport when making requests. The certificate must be paired with + // a TLS client key file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_CERT=$HOME/my_client_cert + ClientTLSCert io.Reader + + // Reader for the TLC client key that should be used by the SDK's HTTP + // transport when making requests. The key must be paired with a TLS client + // certificate file. Will be ignored if both are not provided. + // + // HTTP Client's Transport concrete implementation must be a http.Transport + // or creating the session will fail. + // + // Can also be specified via the environment variable: + // + // AWS_SDK_GO_CLIENT_TLS_KEY=$HOME/my_client_key + ClientTLSKey io.Reader + // The handlers that the session and all API clients will be created with. // This must be a complete set of handlers. Use the defaults.Handlers() // function to initialize this value before changing the handlers to be @@ -319,17 +355,6 @@ func NewSessionWithOptions(opts Options) (*Session, error) { envCfg.EnableSharedConfig = true } - // Only use AWS_CA_BUNDLE if session option is not provided. - if len(envCfg.CustomCABundle) != 0 && opts.CustomCABundle == nil { - f, err := os.Open(envCfg.CustomCABundle) - if err != nil { - return nil, awserr.New("LoadCustomCABundleError", - "failed to open custom CA bundle PEM file", err) - } - defer f.Close() - opts.CustomCABundle = f - } - return newSession(opts, envCfg, &opts.Config) } @@ -460,6 +485,10 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, return nil, err } + if err := setTLSOptions(&opts, cfg, envCfg, sharedCfg); err != nil { + return nil, err + } + s := &Session{ Config: cfg, Handlers: handlers, @@ -479,13 +508,6 @@ func newSession(opts Options, envCfg envConfig, cfgs ...*aws.Config) (*Session, } } - // Setup HTTP client with custom cert bundle if enabled - if opts.CustomCABundle != nil { - if err := loadCustomCABundle(s, opts.CustomCABundle); err != nil { - return nil, err - } - } - return s, nil } @@ -529,22 +551,83 @@ func loadCSMConfig(envCfg envConfig, cfgFiles []string) (csmConfig, error) { return csmConfig{}, nil } -func loadCustomCABundle(s *Session, bundle io.Reader) error { +func setTLSOptions(opts *Options, cfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig) error { + // CA Bundle can be specified in both environment variable shared config file. + var caBundleFilename = envCfg.CustomCABundle + if len(caBundleFilename) == 0 { + caBundleFilename = sharedCfg.CustomCABundle + } + + // Only use environment value if session option is not provided. + customTLSOptions := map[string]struct { + filename string + field *io.Reader + errCode string + }{ + "custom CA bundle PEM": {filename: caBundleFilename, field: &opts.CustomCABundle, errCode: ErrCodeLoadCustomCABundle}, + "custom client TLS cert": {filename: envCfg.ClientTLSCert, field: &opts.ClientTLSCert, errCode: ErrCodeLoadClientTLSCert}, + "custom client TLS key": {filename: envCfg.ClientTLSKey, field: &opts.ClientTLSKey, errCode: ErrCodeLoadClientTLSCert}, + } + for name, v := range customTLSOptions { + if len(v.filename) != 0 && *v.field == nil { + f, err := os.Open(v.filename) + if err != nil { + return awserr.New(v.errCode, fmt.Sprintf("failed to open %s file", name), err) + } + defer f.Close() + *v.field = f + } + } + + // Setup HTTP client with custom cert bundle if enabled + if opts.CustomCABundle != nil { + if err := loadCustomCABundle(cfg.HTTPClient, opts.CustomCABundle); err != nil { + return err + } + } + + // Setup HTTP client TLS certificate and key for client TLS authentication. + if opts.ClientTLSCert != nil && opts.ClientTLSKey != nil { + if err := loadClientTLSCert(cfg.HTTPClient, opts.ClientTLSCert, opts.ClientTLSKey); err != nil { + return err + } + } else if opts.ClientTLSCert == nil && opts.ClientTLSKey == nil { + // Do nothing if neither values are available. + + } else { + return awserr.New(ErrCodeLoadClientTLSCert, + fmt.Sprintf("client TLS cert(%t) and key(%t) must both be provided", + opts.ClientTLSCert != nil, opts.ClientTLSKey != nil), nil) + } + + return nil +} + +func getHTTPTransport(client *http.Client) (*http.Transport, error) { var t *http.Transport - switch v := s.Config.HTTPClient.Transport.(type) { + switch v := client.Transport.(type) { case *http.Transport: t = v default: - if s.Config.HTTPClient.Transport != nil { - return awserr.New("LoadCustomCABundleError", - "unable to load custom CA bundle, HTTPClient's transport unsupported type", nil) + if client.Transport != nil { + return nil, fmt.Errorf("unsupported transport, %T", client.Transport) } } if t == nil { // Nil transport implies `http.DefaultTransport` should be used. Since // the SDK cannot modify, nor copy the `DefaultTransport` specifying // the values the next closest behavior. - t = getCABundleTransport() + t = getCustomTransport() + } + + return t, nil +} + +func loadCustomCABundle(client *http.Client, bundle io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadCustomCABundle, + "unable to load custom CA bundle, HTTPClient's transport unsupported type", err) } p, err := loadCertPool(bundle) @@ -556,7 +639,7 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { } t.TLSClientConfig.RootCAs = p - s.Config.HTTPClient.Transport = t + client.Transport = t return nil } @@ -564,19 +647,57 @@ func loadCustomCABundle(s *Session, bundle io.Reader) error { func loadCertPool(r io.Reader) (*x509.CertPool, error) { b, err := ioutil.ReadAll(r) if err != nil { - return nil, awserr.New("LoadCustomCABundleError", + return nil, awserr.New(ErrCodeLoadCustomCABundle, "failed to read custom CA bundle PEM file", err) } p := x509.NewCertPool() if !p.AppendCertsFromPEM(b) { - return nil, awserr.New("LoadCustomCABundleError", + return nil, awserr.New(ErrCodeLoadCustomCABundle, "failed to load custom CA bundle PEM file", err) } return p, nil } +func loadClientTLSCert(client *http.Client, certFile, keyFile io.Reader) error { + t, err := getHTTPTransport(client) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get usable HTTP transport from client", err) + } + + cert, err := ioutil.ReadAll(certFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS cert file", err) + } + + key, err := ioutil.ReadAll(keyFile) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to get read client TLS key file", err) + } + + clientCert, err := tls.X509KeyPair(cert, key) + if err != nil { + return awserr.New(ErrCodeLoadClientTLSCert, + "unable to load x509 key pair from client cert", err) + } + + tlsCfg := t.TLSClientConfig + if tlsCfg == nil { + tlsCfg = &tls.Config{} + } + + tlsCfg.Certificates = append(tlsCfg.Certificates, clientCert) + + t.TLSClientConfig = tlsCfg + client.Transport = t + + return nil +} + func mergeConfigSrcs(cfg, userCfg *aws.Config, envCfg envConfig, sharedCfg sharedConfig, handlers request.Handlers, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go index 680805a38ad..be7daacf308 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/shared_config.go @@ -34,6 +34,9 @@ const ( // Additional Config fields regionKey = `region` + // custom CA Bundle filename + customCABundleKey = `ca_bundle` + // endpoint discovery group enableEndpointDiscoveryKey = `endpoint_discovery_enabled` // optional @@ -90,6 +93,15 @@ type sharedConfig struct { // region Region string + // CustomCABundle is the file path to a PEM file the SDK will read and + // use to configure the HTTP transport with additional CA certs that are + // not present in the platforms default CA store. + // + // This value will be ignored if the file does not exist. + // + // ca_bundle + CustomCABundle string + // EnableEndpointDiscovery can be enabled in the shared config by setting // endpoint_discovery_enabled to true // @@ -276,6 +288,7 @@ func (cfg *sharedConfig) setFromIniFile(profile string, file sharedConfigFile, e updateString(&cfg.SourceProfileName, section, sourceProfileKey) updateString(&cfg.CredentialSource, section, credentialSourceKey) updateString(&cfg.Region, section, regionKey) + updateString(&cfg.CustomCABundle, section, customCABundleKey) if section.Has(roleDurationSecondsKey) { d := time.Duration(section.Int(roleDurationSecondsKey)) * time.Second diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index a849fbc75b4..919084d52f9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.35.31" +const SDKVersion = "1.36.15" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go index f988e1fbdb3..d536b0954c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/api.go @@ -16,6 +16,91 @@ import ( "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" ) +const opBatchExecuteStatement = "BatchExecuteStatement" + +// BatchExecuteStatementRequest generates a "aws/request.Request" representing the +// client's request for the BatchExecuteStatement operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See BatchExecuteStatement for more information on using the BatchExecuteStatement +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the BatchExecuteStatementRequest method. +// req, resp := client.BatchExecuteStatementRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement +func (c *DynamoDB) BatchExecuteStatementRequest(input *BatchExecuteStatementInput) (req *request.Request, output *BatchExecuteStatementOutput) { + op := &request.Operation{ + Name: opBatchExecuteStatement, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &BatchExecuteStatementInput{} + } + + output = &BatchExecuteStatementOutput{} + req = c.newRequest(op, input, output) + return +} + +// BatchExecuteStatement API operation for Amazon DynamoDB. +// +// This operation allows you to perform batch reads and writes on data stored +// in DynamoDB, using PartiQL. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation BatchExecuteStatement for usage and error information. +// +// Returned Error Types: +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/BatchExecuteStatement +func (c *DynamoDB) BatchExecuteStatement(input *BatchExecuteStatementInput) (*BatchExecuteStatementOutput, error) { + req, out := c.BatchExecuteStatementRequest(input) + return out, req.Send() +} + +// BatchExecuteStatementWithContext is the same as BatchExecuteStatement with the addition of +// the ability to pass a context and additional request options. +// +// See BatchExecuteStatement for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) BatchExecuteStatementWithContext(ctx aws.Context, input *BatchExecuteStatementInput, opts ...request.Option) (*BatchExecuteStatementOutput, error) { + req, out := c.BatchExecuteStatementRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opBatchGetItem = "BatchGetItem" // BatchGetItemRequest generates a "aws/request.Request" representing the @@ -2094,6 +2179,114 @@ func (c *DynamoDB) DescribeGlobalTableSettingsWithContext(ctx aws.Context, input return out, req.Send() } +const opDescribeKinesisStreamingDestination = "DescribeKinesisStreamingDestination" + +// DescribeKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DescribeKinesisStreamingDestination operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See DescribeKinesisStreamingDestination for more information on using the DescribeKinesisStreamingDestination +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the DescribeKinesisStreamingDestinationRequest method. +// req, resp := client.DescribeKinesisStreamingDestinationRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination +func (c *DynamoDB) DescribeKinesisStreamingDestinationRequest(input *DescribeKinesisStreamingDestinationInput) (req *request.Request, output *DescribeKinesisStreamingDestinationOutput) { + op := &request.Operation{ + Name: opDescribeKinesisStreamingDestination, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeKinesisStreamingDestinationInput{} + } + + output = &DescribeKinesisStreamingDestinationOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// DescribeKinesisStreamingDestination API operation for Amazon DynamoDB. +// +// Returns information about the status of Kinesis streaming. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation DescribeKinesisStreamingDestination for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DescribeKinesisStreamingDestination +func (c *DynamoDB) DescribeKinesisStreamingDestination(input *DescribeKinesisStreamingDestinationInput) (*DescribeKinesisStreamingDestinationOutput, error) { + req, out := c.DescribeKinesisStreamingDestinationRequest(input) + return out, req.Send() +} + +// DescribeKinesisStreamingDestinationWithContext is the same as DescribeKinesisStreamingDestination with the addition of +// the ability to pass a context and additional request options. +// +// See DescribeKinesisStreamingDestination for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) DescribeKinesisStreamingDestinationWithContext(ctx aws.Context, input *DescribeKinesisStreamingDestinationInput, opts ...request.Option) (*DescribeKinesisStreamingDestinationOutput, error) { + req, out := c.DescribeKinesisStreamingDestinationRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + const opDescribeLimits = "DescribeLimits" // DescribeLimitsRequest generates a "aws/request.Request" representing the @@ -2565,68 +2758,88 @@ func (c *DynamoDB) DescribeTimeToLiveWithContext(ctx aws.Context, input *Describ return out, req.Send() } -const opExportTableToPointInTime = "ExportTableToPointInTime" +const opDisableKinesisStreamingDestination = "DisableKinesisStreamingDestination" -// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the -// client's request for the ExportTableToPointInTime operation. The "output" return +// DisableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the DisableKinesisStreamingDestination operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime +// See DisableKinesisStreamingDestination for more information on using the DisableKinesisStreamingDestination // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ExportTableToPointInTimeRequest method. -// req, resp := client.ExportTableToPointInTimeRequest(params) +// // Example sending a request using the DisableKinesisStreamingDestinationRequest method. +// req, resp := client.DisableKinesisStreamingDestinationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime -func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination +func (c *DynamoDB) DisableKinesisStreamingDestinationRequest(input *DisableKinesisStreamingDestinationInput) (req *request.Request, output *DisableKinesisStreamingDestinationOutput) { op := &request.Operation{ - Name: opExportTableToPointInTime, + Name: opDisableKinesisStreamingDestination, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ExportTableToPointInTimeInput{} + input = &DisableKinesisStreamingDestinationInput{} } - output = &ExportTableToPointInTimeOutput{} + output = &DisableKinesisStreamingDestinationOutput{} req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } return } -// ExportTableToPointInTime API operation for Amazon DynamoDB. +// DisableKinesisStreamingDestination API operation for Amazon DynamoDB. // -// Exports table data to an S3 bucket. The table must have point in time recovery -// enabled, and you can export data from any time within the point in time recovery -// window. +// Stops replication from the DynamoDB table to the Kinesis data stream. This +// is done without deleting either of the resources. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ExportTableToPointInTime for usage and error information. +// API operation DisableKinesisStreamingDestination for usage and error information. // // Returned Error Types: -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. -// -// * PointInTimeRecoveryUnavailableException -// Point in time recovery has not yet been enabled for this source table. +// * InternalServerError +// An error occurred on the server side. // // * LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. @@ -2642,75 +2855,75 @@ func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTi // // There is a soft account quota of 256 tables. // -// * InvalidExportTimeException -// The specified ExportTime is outside of the point in time recovery window. -// -// * ExportConflictException -// There was a conflict when writing to the specified S3 bucket. +// * ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. // -// * InternalServerError -// An error occurred on the server side. +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime -func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { - req, out := c.ExportTableToPointInTimeRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/DisableKinesisStreamingDestination +func (c *DynamoDB) DisableKinesisStreamingDestination(input *DisableKinesisStreamingDestinationInput) (*DisableKinesisStreamingDestinationOutput, error) { + req, out := c.DisableKinesisStreamingDestinationRequest(input) return out, req.Send() } -// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of +// DisableKinesisStreamingDestinationWithContext is the same as DisableKinesisStreamingDestination with the addition of // the ability to pass a context and additional request options. // -// See ExportTableToPointInTime for details on how to use this API operation. +// See DisableKinesisStreamingDestination for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { - req, out := c.ExportTableToPointInTimeRequest(input) +func (c *DynamoDB) DisableKinesisStreamingDestinationWithContext(ctx aws.Context, input *DisableKinesisStreamingDestinationInput, opts ...request.Option) (*DisableKinesisStreamingDestinationOutput, error) { + req, out := c.DisableKinesisStreamingDestinationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opGetItem = "GetItem" +const opEnableKinesisStreamingDestination = "EnableKinesisStreamingDestination" -// GetItemRequest generates a "aws/request.Request" representing the -// client's request for the GetItem operation. The "output" return +// EnableKinesisStreamingDestinationRequest generates a "aws/request.Request" representing the +// client's request for the EnableKinesisStreamingDestination operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See GetItem for more information on using the GetItem +// See EnableKinesisStreamingDestination for more information on using the EnableKinesisStreamingDestination // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the GetItemRequest method. -// req, resp := client.GetItemRequest(params) +// // Example sending a request using the EnableKinesisStreamingDestinationRequest method. +// req, resp := client.EnableKinesisStreamingDestinationRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem -func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination +func (c *DynamoDB) EnableKinesisStreamingDestinationRequest(input *EnableKinesisStreamingDestinationInput) (req *request.Request, output *EnableKinesisStreamingDestinationOutput) { op := &request.Operation{ - Name: opGetItem, + Name: opEnableKinesisStreamingDestination, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &GetItemInput{} + input = &EnableKinesisStreamingDestinationInput{} } - output = &GetItemOutput{} + output = &EnableKinesisStreamingDestinationOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -2740,381 +2953,434 @@ func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, ou return } -// GetItem API operation for Amazon DynamoDB. +// EnableKinesisStreamingDestination API operation for Amazon DynamoDB. // -// The GetItem operation returns a set of attributes for the item with the given -// primary key. If there is no matching item, GetItem does not return any data -// and there will be no Item element in the response. -// -// GetItem provides an eventually consistent read by default. If your application -// requires a strongly consistent read, set ConsistentRead to true. Although -// a strongly consistent read might take more time than an eventually consistent -// read, it always returns the last updated value. +// Starts table data replication to the specified Kinesis data stream at a timestamp +// chosen during the enable workflow. If this operation doesn't return results +// immediately, use DescribeKinesisStreamingDestination to check if streaming +// to the Kinesis data stream is ACTIVE. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation GetItem for usage and error information. +// API operation EnableKinesisStreamingDestination for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// * InternalServerError +// An error occurred on the server side. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a quota increase. +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// * InternalServerError -// An error occurred on the server side. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem -func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { - req, out := c.GetItemRequest(input) +// There is a soft account quota of 256 tables. +// +// * ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/EnableKinesisStreamingDestination +func (c *DynamoDB) EnableKinesisStreamingDestination(input *EnableKinesisStreamingDestinationInput) (*EnableKinesisStreamingDestinationOutput, error) { + req, out := c.EnableKinesisStreamingDestinationRequest(input) return out, req.Send() } -// GetItemWithContext is the same as GetItem with the addition of +// EnableKinesisStreamingDestinationWithContext is the same as EnableKinesisStreamingDestination with the addition of // the ability to pass a context and additional request options. // -// See GetItem for details on how to use this API operation. +// See EnableKinesisStreamingDestination for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) { - req, out := c.GetItemRequest(input) +func (c *DynamoDB) EnableKinesisStreamingDestinationWithContext(ctx aws.Context, input *EnableKinesisStreamingDestinationInput, opts ...request.Option) (*EnableKinesisStreamingDestinationOutput, error) { + req, out := c.EnableKinesisStreamingDestinationRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListBackups = "ListBackups" +const opExecuteStatement = "ExecuteStatement" -// ListBackupsRequest generates a "aws/request.Request" representing the -// client's request for the ListBackups operation. The "output" return +// ExecuteStatementRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteStatement operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListBackups for more information on using the ListBackups +// See ExecuteStatement for more information on using the ExecuteStatement // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListBackupsRequest method. -// req, resp := client.ListBackupsRequest(params) +// // Example sending a request using the ExecuteStatementRequest method. +// req, resp := client.ExecuteStatementRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups -func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement +func (c *DynamoDB) ExecuteStatementRequest(input *ExecuteStatementInput) (req *request.Request, output *ExecuteStatementOutput) { op := &request.Operation{ - Name: opListBackups, + Name: opExecuteStatement, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListBackupsInput{} + input = &ExecuteStatementInput{} } - output = &ListBackupsOutput{} + output = &ExecuteStatementOutput{} req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } return } -// ListBackups API operation for Amazon DynamoDB. -// -// List backups associated with an AWS account. To list backups for a given -// table, specify TableName. ListBackups returns a paginated list of results -// with at most 1 MB worth of items in a page. You can also specify a maximum -// number of entries to be returned in a page. +// ExecuteStatement API operation for Amazon DynamoDB. // -// In the request, start time is inclusive, but end time is exclusive. Note -// that these boundaries are for the time at which the original backup was requested. -// -// You can call ListBackups a maximum of five times per second. +// This operation allows you to perform reads and singleton writes on data stored +// in DynamoDB, using PartiQL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListBackups for usage and error information. +// API operation ExecuteStatement for usage and error information. // // Returned Error Types: +// * ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// * TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups -func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { - req, out := c.ListBackupsRequest(input) +// * DuplicateItemException +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteStatement +func (c *DynamoDB) ExecuteStatement(input *ExecuteStatementInput) (*ExecuteStatementOutput, error) { + req, out := c.ExecuteStatementRequest(input) return out, req.Send() } -// ListBackupsWithContext is the same as ListBackups with the addition of +// ExecuteStatementWithContext is the same as ExecuteStatement with the addition of // the ability to pass a context and additional request options. // -// See ListBackups for details on how to use this API operation. +// See ExecuteStatement for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) { - req, out := c.ListBackupsRequest(input) +func (c *DynamoDB) ExecuteStatementWithContext(ctx aws.Context, input *ExecuteStatementInput, opts ...request.Option) (*ExecuteStatementOutput, error) { + req, out := c.ExecuteStatementRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListContributorInsights = "ListContributorInsights" +const opExecuteTransaction = "ExecuteTransaction" -// ListContributorInsightsRequest generates a "aws/request.Request" representing the -// client's request for the ListContributorInsights operation. The "output" return +// ExecuteTransactionRequest generates a "aws/request.Request" representing the +// client's request for the ExecuteTransaction operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListContributorInsights for more information on using the ListContributorInsights +// See ExecuteTransaction for more information on using the ExecuteTransaction // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListContributorInsightsRequest method. -// req, resp := client.ListContributorInsightsRequest(params) +// // Example sending a request using the ExecuteTransactionRequest method. +// req, resp := client.ExecuteTransactionRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights -func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction +func (c *DynamoDB) ExecuteTransactionRequest(input *ExecuteTransactionInput) (req *request.Request, output *ExecuteTransactionOutput) { op := &request.Operation{ - Name: opListContributorInsights, + Name: opExecuteTransaction, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListContributorInsightsInput{} + input = &ExecuteTransactionInput{} } - output = &ListContributorInsightsOutput{} + output = &ExecuteTransactionOutput{} req = c.newRequest(op, input, output) return } -// ListContributorInsights API operation for Amazon DynamoDB. +// ExecuteTransaction API operation for Amazon DynamoDB. // -// Returns a list of ContributorInsightsSummary for a table and all its global -// secondary indexes. +// This operation allows you to perform transactional reads or writes on data +// stored in DynamoDB, using PartiQL. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListContributorInsights for usage and error information. +// API operation ExecuteTransaction for usage and error information. // // Returned Error Types: // * ResourceNotFoundException // The operation tried to access a nonexistent table or index. The resource // might not be specified correctly, or its status might not be ACTIVE. // +// * TransactionCanceledException +// The entire transaction request was canceled. +// +// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// +// * A condition in one of the condition expressions is not met. +// +// * A table in the TransactWriteItems request is in a different account +// or region. +// +// * More than one action in the TransactWriteItems operation targets the +// same item. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// * There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// * There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// * A table in the TransactGetItems request is in a different account or +// region. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// * TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// * IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights -func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) { - req, out := c.ListContributorInsightsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExecuteTransaction +func (c *DynamoDB) ExecuteTransaction(input *ExecuteTransactionInput) (*ExecuteTransactionOutput, error) { + req, out := c.ExecuteTransactionRequest(input) return out, req.Send() } -// ListContributorInsightsWithContext is the same as ListContributorInsights with the addition of +// ExecuteTransactionWithContext is the same as ExecuteTransaction with the addition of // the ability to pass a context and additional request options. // -// See ListContributorInsights for details on how to use this API operation. +// See ExecuteTransaction for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *ListContributorInsightsInput, opts ...request.Option) (*ListContributorInsightsOutput, error) { - req, out := c.ListContributorInsightsRequest(input) +func (c *DynamoDB) ExecuteTransactionWithContext(ctx aws.Context, input *ExecuteTransactionInput, opts ...request.Option) (*ExecuteTransactionOutput, error) { + req, out := c.ExecuteTransactionRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListContributorInsightsPages iterates over the pages of a ListContributorInsights operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListContributorInsights method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListContributorInsights operation. -// pageNum := 0 -// err := client.ListContributorInsightsPages(params, -// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error { - return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListContributorInsightsPagesWithContext same as ListContributorInsightsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListContributorInsightsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListContributorInsightsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListContributorInsightsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListExports = "ListExports" - -// ListExportsRequest generates a "aws/request.Request" representing the -// client's request for the ListExports operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +const opExportTableToPointInTime = "ExportTableToPointInTime" + +// ExportTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the ExportTableToPointInTime operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListExports for more information on using the ListExports +// See ExportTableToPointInTime for more information on using the ExportTableToPointInTime // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListExportsRequest method. -// req, resp := client.ListExportsRequest(params) +// // Example sending a request using the ExportTableToPointInTimeRequest method. +// req, resp := client.ExportTableToPointInTimeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports -func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTimeRequest(input *ExportTableToPointInTimeInput) (req *request.Request, output *ExportTableToPointInTimeOutput) { op := &request.Operation{ - Name: opListExports, + Name: opExportTableToPointInTime, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"NextToken"}, - OutputTokens: []string{"NextToken"}, - LimitToken: "MaxResults", - TruncationToken: "", - }, } if input == nil { - input = &ListExportsInput{} + input = &ExportTableToPointInTimeInput{} } - output = &ListExportsOutput{} + output = &ExportTableToPointInTimeOutput{} req = c.newRequest(op, input, output) return } -// ListExports API operation for Amazon DynamoDB. +// ExportTableToPointInTime API operation for Amazon DynamoDB. // -// Lists completed exports within the past 90 days. +// Exports table data to an S3 bucket. The table must have point in time recovery +// enabled, and you can export data from any time within the point in time recovery +// window. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListExports for usage and error information. +// API operation ExportTableToPointInTime for usage and error information. // // Returned Error Types: +// * TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// * PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. +// // * LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // @@ -3129,121 +3395,75 @@ func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Req // // There is a soft account quota of 256 tables. // +// * InvalidExportTimeException +// The specified ExportTime is outside of the point in time recovery window. +// +// * ExportConflictException +// There was a conflict when writing to the specified S3 bucket. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports -func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { - req, out := c.ListExportsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ExportTableToPointInTime +func (c *DynamoDB) ExportTableToPointInTime(input *ExportTableToPointInTimeInput) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) return out, req.Send() } -// ListExportsWithContext is the same as ListExports with the addition of +// ExportTableToPointInTimeWithContext is the same as ExportTableToPointInTime with the addition of // the ability to pass a context and additional request options. // -// See ListExports for details on how to use this API operation. +// See ExportTableToPointInTime for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { - req, out := c.ListExportsRequest(input) +func (c *DynamoDB) ExportTableToPointInTimeWithContext(ctx aws.Context, input *ExportTableToPointInTimeInput, opts ...request.Option) (*ExportTableToPointInTimeOutput, error) { + req, out := c.ExportTableToPointInTimeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListExportsPages iterates over the pages of a ListExports operation, -// calling the "fn" function with the response data for each page. To stop -// iterating, return false from the fn function. -// -// See ListExports method for more information on how to use this operation. -// -// Note: This operation can generate multiple requests to a service. -// -// // Example iterating over at most 3 pages of a ListExports operation. -// pageNum := 0 -// err := client.ListExportsPages(params, -// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { -// pageNum++ -// fmt.Println(page) -// return pageNum <= 3 -// }) -// -func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { - return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) -} - -// ListExportsPagesWithContext same as ListExportsPages except -// it takes a Context and allows setting request options on the pages. -// -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { - p := request.Pagination{ - NewRequest: func() (*request.Request, error) { - var inCpy *ListExportsInput - if input != nil { - tmp := *input - inCpy = &tmp - } - req, _ := c.ListExportsRequest(inCpy) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return req, nil - }, - } - - for p.Next() { - if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { - break - } - } - - return p.Err() -} - -const opListGlobalTables = "ListGlobalTables" +const opGetItem = "GetItem" -// ListGlobalTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListGlobalTables operation. The "output" return +// GetItemRequest generates a "aws/request.Request" representing the +// client's request for the GetItem operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListGlobalTables for more information on using the ListGlobalTables +// See GetItem for more information on using the GetItem // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListGlobalTablesRequest method. -// req, resp := client.ListGlobalTablesRequest(params) +// // Example sending a request using the GetItemRequest method. +// req, resp := client.GetItemRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItemRequest(input *GetItemInput) (req *request.Request, output *GetItemOutput) { op := &request.Operation{ - Name: opListGlobalTables, + Name: opGetItem, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &ListGlobalTablesInput{} + input = &GetItemInput{} } - output = &ListGlobalTablesOutput{} + output = &GetItemOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -3273,90 +3493,105 @@ func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *r return } -// ListGlobalTables API operation for Amazon DynamoDB. +// GetItem API operation for Amazon DynamoDB. // -// Lists all global tables that have a replica in the specified Region. +// The GetItem operation returns a set of attributes for the item with the given +// primary key. If there is no matching item, GetItem does not return any data +// and there will be no Item element in the response. // -// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) -// of global tables. +// GetItem provides an eventually consistent read by default. If your application +// requires a strongly consistent read, set ConsistentRead to true. Although +// a strongly consistent read might take more time than an eventually consistent +// read, it always returns the last updated value. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListGlobalTables for usage and error information. +// API operation GetItem for usage and error information. // // Returned Error Types: +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables -func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/GetItem +func (c *DynamoDB) GetItem(input *GetItemInput) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) return out, req.Send() } -// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of +// GetItemWithContext is the same as GetItem with the addition of // the ability to pass a context and additional request options. // -// See ListGlobalTables for details on how to use this API operation. +// See GetItem for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { - req, out := c.ListGlobalTablesRequest(input) +func (c *DynamoDB) GetItemWithContext(ctx aws.Context, input *GetItemInput, opts ...request.Option) (*GetItemOutput, error) { + req, out := c.GetItemRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opListTables = "ListTables" +const opListBackups = "ListBackups" -// ListTablesRequest generates a "aws/request.Request" representing the -// client's request for the ListTables operation. The "output" return +// ListBackupsRequest generates a "aws/request.Request" representing the +// client's request for the ListBackups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTables for more information on using the ListTables +// See ListBackups for more information on using the ListBackups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTablesRequest method. -// req, resp := client.ListTablesRequest(params) +// // Example sending a request using the ListBackupsRequest method. +// req, resp := client.ListBackupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackupsRequest(input *ListBackupsInput) (req *request.Request, output *ListBackupsOutput) { op := &request.Operation{ - Name: opListTables, + Name: opListBackups, HTTPMethod: "POST", HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"ExclusiveStartTableName"}, - OutputTokens: []string{"LastEvaluatedTableName"}, - LimitToken: "Limit", - TruncationToken: "", - }, } if input == nil { - input = &ListTablesInput{} + input = &ListBackupsInput{} } - output = &ListTablesOutput{} + output = &ListBackupsOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -3386,82 +3621,178 @@ func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Reque return } -// ListTables API operation for Amazon DynamoDB. +// ListBackups API operation for Amazon DynamoDB. // -// Returns an array of table names associated with the current account and endpoint. -// The output from ListTables is paginated, with each page returning a maximum -// of 100 table names. +// List backups associated with an AWS account. To list backups for a given +// table, specify TableName. ListBackups returns a paginated list of results +// with at most 1 MB worth of items in a page. You can also specify a maximum +// number of entries to be returned in a page. +// +// In the request, start time is inclusive, but end time is exclusive. Note +// that these boundaries are for the time at which the original backup was requested. +// +// You can call ListBackups a maximum of five times per second. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListTables for usage and error information. +// API operation ListBackups for usage and error information. // // Returned Error Types: // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables -func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { - req, out := c.ListTablesRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListBackups +func (c *DynamoDB) ListBackups(input *ListBackupsInput) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) return out, req.Send() } -// ListTablesWithContext is the same as ListTables with the addition of +// ListBackupsWithContext is the same as ListBackups with the addition of // the ability to pass a context and additional request options. // -// See ListTables for details on how to use this API operation. +// See ListBackups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { - req, out := c.ListTablesRequest(input) +func (c *DynamoDB) ListBackupsWithContext(ctx aws.Context, input *ListBackupsInput, opts ...request.Option) (*ListBackupsOutput, error) { + req, out := c.ListBackupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ListTablesPages iterates over the pages of a ListTables operation, +const opListContributorInsights = "ListContributorInsights" + +// ListContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the ListContributorInsights operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListContributorInsights for more information on using the ListContributorInsights +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the ListContributorInsightsRequest method. +// req, resp := client.ListContributorInsightsRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights +func (c *DynamoDB) ListContributorInsightsRequest(input *ListContributorInsightsInput) (req *request.Request, output *ListContributorInsightsOutput) { + op := &request.Operation{ + Name: opListContributorInsights, + HTTPMethod: "POST", + HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, + } + + if input == nil { + input = &ListContributorInsightsInput{} + } + + output = &ListContributorInsightsOutput{} + req = c.newRequest(op, input, output) + return +} + +// ListContributorInsights API operation for Amazon DynamoDB. +// +// Returns a list of ContributorInsightsSummary for a table and all its global +// secondary indexes. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation ListContributorInsights for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListContributorInsights +func (c *DynamoDB) ListContributorInsights(input *ListContributorInsightsInput) (*ListContributorInsightsOutput, error) { + req, out := c.ListContributorInsightsRequest(input) + return out, req.Send() +} + +// ListContributorInsightsWithContext is the same as ListContributorInsights with the addition of +// the ability to pass a context and additional request options. +// +// See ListContributorInsights for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListContributorInsightsWithContext(ctx aws.Context, input *ListContributorInsightsInput, opts ...request.Option) (*ListContributorInsightsOutput, error) { + req, out := c.ListContributorInsightsRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// ListContributorInsightsPages iterates over the pages of a ListContributorInsights operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See ListTables method for more information on how to use this operation. +// See ListContributorInsights method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a ListTables operation. +// // Example iterating over at most 3 pages of a ListContributorInsights operation. // pageNum := 0 -// err := client.ListTablesPages(params, -// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { +// err := client.ListContributorInsightsPages(params, +// func(page *dynamodb.ListContributorInsightsOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { - return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *DynamoDB) ListContributorInsightsPages(input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool) error { + return c.ListContributorInsightsPagesWithContext(aws.BackgroundContext(), input, fn) } -// ListTablesPagesWithContext same as ListTablesPages except +// ListContributorInsightsPagesWithContext same as ListContributorInsightsPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { +func (c *DynamoDB) ListContributorInsightsPagesWithContext(ctx aws.Context, input *ListContributorInsightsInput, fn func(*ListContributorInsightsOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ListTablesInput + var inCpy *ListContributorInsightsInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ListTablesRequest(inCpy) + req, _ := c.ListContributorInsightsRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3469,7 +3800,7 @@ func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTables } for p.Next() { - if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListContributorInsightsOutput), !p.HasNextPage()) { break } } @@ -3477,156 +3808,195 @@ func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTables return p.Err() } -const opListTagsOfResource = "ListTagsOfResource" +const opListExports = "ListExports" -// ListTagsOfResourceRequest generates a "aws/request.Request" representing the -// client's request for the ListTagsOfResource operation. The "output" return +// ListExportsRequest generates a "aws/request.Request" representing the +// client's request for the ListExports operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See ListTagsOfResource for more information on using the ListTagsOfResource +// See ListExports for more information on using the ListExports // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ListTagsOfResourceRequest method. -// req, resp := client.ListTagsOfResourceRequest(params) +// // Example sending a request using the ListExportsRequest method. +// req, resp := client.ListExportsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource -func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExportsRequest(input *ListExportsInput) (req *request.Request, output *ListExportsOutput) { op := &request.Operation{ - Name: opListTagsOfResource, + Name: opListExports, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"NextToken"}, + OutputTokens: []string{"NextToken"}, + LimitToken: "MaxResults", + TruncationToken: "", + }, } if input == nil { - input = &ListTagsOfResourceInput{} + input = &ListExportsInput{} } - output = &ListTagsOfResourceOutput{} + output = &ListExportsOutput{} req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } return } -// ListTagsOfResource API operation for Amazon DynamoDB. -// -// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource -// up to 10 times per second, per account. +// ListExports API operation for Amazon DynamoDB. // -// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// Lists completed exports within the past 90 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation ListTagsOfResource for usage and error information. +// API operation ListExports for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. // // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource -func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { - req, out := c.ListTagsOfResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListExports +func (c *DynamoDB) ListExports(input *ListExportsInput) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) return out, req.Send() } -// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of +// ListExportsWithContext is the same as ListExports with the addition of // the ability to pass a context and additional request options. // -// See ListTagsOfResource for details on how to use this API operation. +// See ListExports for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) { - req, out := c.ListTagsOfResourceRequest(input) +func (c *DynamoDB) ListExportsWithContext(ctx aws.Context, input *ListExportsInput, opts ...request.Option) (*ListExportsOutput, error) { + req, out := c.ListExportsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opPutItem = "PutItem" - -// PutItemRequest generates a "aws/request.Request" representing the -// client's request for the PutItem operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// ListExportsPages iterates over the pages of a ListExports operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// See ListExports method for more information on how to use this operation. // -// See PutItem for more information on using the PutItem -// API call, and error handling. +// Note: This operation can generate multiple requests to a service. // -// This method is useful when you want to inject custom logic or configuration +// // Example iterating over at most 3 pages of a ListExports operation. +// pageNum := 0 +// err := client.ListExportsPages(params, +// func(page *dynamodb.ListExportsOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) ListExportsPages(input *ListExportsInput, fn func(*ListExportsOutput, bool) bool) error { + return c.ListExportsPagesWithContext(aws.BackgroundContext(), input, fn) +} + +// ListExportsPagesWithContext same as ListExportsPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ListExportsPagesWithContext(ctx aws.Context, input *ListExportsInput, fn func(*ListExportsOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ListExportsInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ListExportsRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ListExportsOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opListGlobalTables = "ListGlobalTables" + +// ListGlobalTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListGlobalTables operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See ListGlobalTables for more information on using the ListGlobalTables +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the PutItemRequest method. -// req, resp := client.PutItemRequest(params) +// // Example sending a request using the ListGlobalTablesRequest method. +// req, resp := client.ListGlobalTablesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem -func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTablesRequest(input *ListGlobalTablesInput) (req *request.Request, output *ListGlobalTablesOutput) { op := &request.Operation{ - Name: opPutItem, + Name: opListGlobalTables, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &PutItemInput{} + input = &ListGlobalTablesInput{} } - output = &PutItemOutput{} + output = &ListGlobalTablesOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -3656,163 +4026,90 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou return } -// PutItem API operation for Amazon DynamoDB. -// -// Creates a new item, or replaces an old item with a new item. If an item that -// has the same primary key as the new item already exists in the specified -// table, the new item completely replaces the existing item. You can perform -// a conditional put operation (add a new item if one with the specified primary -// key doesn't exist), or replace an existing item if it has certain attribute -// values. You can return the item's attribute values in the same operation, -// using the ReturnValues parameter. -// -// This topic provides general information about the PutItem API. -// -// For information on how to call the PutItem API using the AWS SDK in specific -// languages, see the following: -// -// * PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) -// -// * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) -// -// When you add an item, the primary key attributes are the only required attributes. -// Attribute values cannot be null. -// -// Empty String and Binary attribute values are allowed. Attribute values of -// type String and Binary must have a length greater than zero if the attribute -// is used as a key attribute for a table or index. Set type attributes cannot -// be empty. -// -// Invalid Requests with empty values will be rejected with a ValidationException -// exception. +// ListGlobalTables API operation for Amazon DynamoDB. // -// To prevent a new item from replacing an existing item, use a conditional -// expression that contains the attribute_not_exists function with the name -// of the attribute being used as the partition key for the table. Since every -// record must contain that attribute, the attribute_not_exists function will -// only succeed if no matching item exists. +// Lists all global tables that have a replica in the specified Region. // -// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) -// in the Amazon DynamoDB Developer Guide. +// This operation only applies to Version 2017.11.29 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V1.html) +// of global tables. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation PutItem for usage and error information. +// API operation ListGlobalTables for usage and error information. // // Returned Error Types: -// * ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. -// -// * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// * TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a quota increase. -// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem -func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { - req, out := c.PutItemRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListGlobalTables +func (c *DynamoDB) ListGlobalTables(input *ListGlobalTablesInput) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) return out, req.Send() } -// PutItemWithContext is the same as PutItem with the addition of +// ListGlobalTablesWithContext is the same as ListGlobalTables with the addition of // the ability to pass a context and additional request options. // -// See PutItem for details on how to use this API operation. +// See ListGlobalTables for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) { - req, out := c.PutItemRequest(input) +func (c *DynamoDB) ListGlobalTablesWithContext(ctx aws.Context, input *ListGlobalTablesInput, opts ...request.Option) (*ListGlobalTablesOutput, error) { + req, out := c.ListGlobalTablesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opQuery = "Query" +const opListTables = "ListTables" -// QueryRequest generates a "aws/request.Request" representing the -// client's request for the Query operation. The "output" return +// ListTablesRequest generates a "aws/request.Request" representing the +// client's request for the ListTables operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See Query for more information on using the Query +// See ListTables for more information on using the ListTables // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the QueryRequest method. -// req, resp := client.QueryRequest(params) +// // Example sending a request using the ListTablesRequest method. +// req, resp := client.ListTablesRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query -func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTablesRequest(input *ListTablesInput) (req *request.Request, output *ListTablesOutput) { op := &request.Operation{ - Name: opQuery, + Name: opListTables, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ - InputTokens: []string{"ExclusiveStartKey"}, - OutputTokens: []string{"LastEvaluatedKey"}, + InputTokens: []string{"ExclusiveStartTableName"}, + OutputTokens: []string{"LastEvaluatedTableName"}, LimitToken: "Limit", TruncationToken: "", }, } if input == nil { - input = &QueryInput{} + input = &ListTablesInput{} } - output = &QueryOutput{} + output = &ListTablesOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -3842,144 +4139,82 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output return } -// Query API operation for Amazon DynamoDB. -// -// The Query operation finds items based on primary key values. You can query -// any table or secondary index that has a composite primary key (a partition -// key and a sort key). -// -// Use the KeyConditionExpression parameter to provide a specific value for -// the partition key. The Query operation will return all of the items from -// the table or index with that partition key value. You can optionally narrow -// the scope of the Query operation by specifying a sort key value and a comparison -// operator in KeyConditionExpression. To further refine the Query results, -// you can optionally provide a FilterExpression. A FilterExpression determines -// which items within the results should be returned to you. All of the other -// results are discarded. -// -// A Query operation always returns a result set. If no matching items are found, -// the result set will be empty. Queries that do not return results consume -// the minimum number of read capacity units for that type of read operation. -// -// DynamoDB calculates the number of read capacity units consumed based on item -// size, not on the amount of data that is returned to an application. The number -// of capacity units consumed will be the same whether you request all of the -// attributes (the default behavior) or just some of them (using a projection -// expression). The number will also be the same whether or not you use a FilterExpression. -// -// Query results are always sorted by the sort key value. If the data type of -// the sort key is Number, the results are returned in numeric order; otherwise, -// the results are returned in order of UTF-8 bytes. By default, the sort order -// is ascending. To reverse the order, set the ScanIndexForward parameter to -// false. -// -// A single Query operation will read up to the maximum number of items set -// (if using the Limit parameter) or a maximum of 1 MB of data and then apply -// any filtering to the results using FilterExpression. If LastEvaluatedKey -// is present in the response, you will need to paginate the result set. For -// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) -// in the Amazon DynamoDB Developer Guide. -// -// FilterExpression is applied after a Query finishes, but before the results -// are returned. A FilterExpression cannot contain partition key or sort key -// attributes. You need to specify those attributes in the KeyConditionExpression. -// -// A Query operation can return an empty result set and a LastEvaluatedKey if -// all the items read for the page of results are filtered out. +// ListTables API operation for Amazon DynamoDB. // -// You can query a table, a local secondary index, or a global secondary index. -// For a query on a table or on a local secondary index, you can set the ConsistentRead -// parameter to true and obtain a strongly consistent result. Global secondary -// indexes support eventually consistent reads only, so do not specify ConsistentRead -// when querying a global secondary index. +// Returns an array of table names associated with the current account and endpoint. +// The output from ListTables is paginated, with each page returning a maximum +// of 100 table names. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation Query for usage and error information. +// API operation ListTables for usage and error information. // // Returned Error Types: -// * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a quota increase. -// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query -func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { - req, out := c.QueryRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTables +func (c *DynamoDB) ListTables(input *ListTablesInput) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) return out, req.Send() } -// QueryWithContext is the same as Query with the addition of +// ListTablesWithContext is the same as ListTables with the addition of // the ability to pass a context and additional request options. // -// See Query for details on how to use this API operation. +// See ListTables for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) { - req, out := c.QueryRequest(input) +func (c *DynamoDB) ListTablesWithContext(ctx aws.Context, input *ListTablesInput, opts ...request.Option) (*ListTablesOutput, error) { + req, out := c.ListTablesRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// QueryPages iterates over the pages of a Query operation, +// ListTablesPages iterates over the pages of a ListTables operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See Query method for more information on how to use this operation. +// See ListTables method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a Query operation. +// // Example iterating over at most 3 pages of a ListTables operation. // pageNum := 0 -// err := client.QueryPages(params, -// func(page *dynamodb.QueryOutput, lastPage bool) bool { +// err := client.ListTablesPages(params, +// func(page *dynamodb.ListTablesOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { - return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *DynamoDB) ListTablesPages(input *ListTablesInput, fn func(*ListTablesOutput, bool) bool) error { + return c.ListTablesPagesWithContext(aws.BackgroundContext(), input, fn) } -// QueryPagesWithContext same as QueryPages except +// ListTablesPagesWithContext same as ListTablesPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error { +func (c *DynamoDB) ListTablesPagesWithContext(ctx aws.Context, input *ListTablesInput, fn func(*ListTablesOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *QueryInput + var inCpy *ListTablesInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.QueryRequest(inCpy) + req, _ := c.ListTablesRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -3987,7 +4222,7 @@ func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn } for p.Next() { - if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) { + if !fn(p.Page().(*ListTablesOutput), !p.HasNextPage()) { break } } @@ -3995,44 +4230,44 @@ func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn return p.Err() } -const opRestoreTableFromBackup = "RestoreTableFromBackup" +const opListTagsOfResource = "ListTagsOfResource" -// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the -// client's request for the RestoreTableFromBackup operation. The "output" return +// ListTagsOfResourceRequest generates a "aws/request.Request" representing the +// client's request for the ListTagsOfResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup +// See ListTagsOfResource for more information on using the ListTagsOfResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RestoreTableFromBackupRequest method. -// req, resp := client.RestoreTableFromBackupRequest(params) +// // Example sending a request using the ListTagsOfResourceRequest method. +// req, resp := client.ListTagsOfResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup -func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResourceRequest(input *ListTagsOfResourceInput) (req *request.Request, output *ListTagsOfResourceOutput) { op := &request.Operation{ - Name: opRestoreTableFromBackup, + Name: opListTagsOfResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RestoreTableFromBackupInput{} + input = &ListTagsOfResourceInput{} } - output = &RestoreTableFromBackupOutput{} + output = &ListTagsOfResourceOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -4062,125 +4297,89 @@ func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupIn return } -// RestoreTableFromBackup API operation for Amazon DynamoDB. -// -// Creates a new table from an existing backup. Any number of users can execute -// up to 4 concurrent restores (any type of restore) in a given account. -// -// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. -// -// You must manually set up the following on the restored table: -// -// * Auto scaling policies -// -// * IAM policies -// -// * Amazon CloudWatch metrics and alarms -// -// * Tags +// ListTagsOfResource API operation for Amazon DynamoDB. // -// * Stream settings +// List all tags on an Amazon DynamoDB resource. You can call ListTagsOfResource +// up to 10 times per second, per account. // -// * Time to Live (TTL) settings +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation RestoreTableFromBackup for usage and error information. +// API operation ListTagsOfResource for usage and error information. // // Returned Error Types: -// * TableAlreadyExistsException -// A target table with the specified name already exists. -// -// * TableInUseException -// A target table with the specified name is either being created or deleted. -// -// * BackupNotFoundException -// Backup not found for the given BackupARN. -// -// * BackupInUseException -// There is another ongoing conflicting backup control plane operation on the -// table. The backup is either being created, deleted or restored to a table. -// -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. -// -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. -// -// There is a soft account quota of 256 tables. +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup -func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { - req, out := c.RestoreTableFromBackupRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/ListTagsOfResource +func (c *DynamoDB) ListTagsOfResource(input *ListTagsOfResourceInput) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) return out, req.Send() } -// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of +// ListTagsOfResourceWithContext is the same as ListTagsOfResource with the addition of // the ability to pass a context and additional request options. // -// See RestoreTableFromBackup for details on how to use this API operation. +// See ListTagsOfResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) { - req, out := c.RestoreTableFromBackupRequest(input) +func (c *DynamoDB) ListTagsOfResourceWithContext(ctx aws.Context, input *ListTagsOfResourceInput, opts ...request.Option) (*ListTagsOfResourceOutput, error) { + req, out := c.ListTagsOfResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opRestoreTableToPointInTime = "RestoreTableToPointInTime" +const opPutItem = "PutItem" -// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the -// client's request for the RestoreTableToPointInTime operation. The "output" return +// PutItemRequest generates a "aws/request.Request" representing the +// client's request for the PutItem operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime +// See PutItem for more information on using the PutItem // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the RestoreTableToPointInTimeRequest method. -// req, resp := client.RestoreTableToPointInTimeRequest(params) +// // Example sending a request using the PutItemRequest method. +// req, resp := client.PutItemRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime -func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, output *PutItemOutput) { op := &request.Operation{ - Name: opRestoreTableToPointInTime, + Name: opPutItem, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &RestoreTableToPointInTimeInput{} + input = &PutItemInput{} } - output = &RestoreTableToPointInTimeOutput{} + output = &PutItemOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -4210,138 +4409,148 @@ func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointIn return } -// RestoreTableToPointInTime API operation for Amazon DynamoDB. +// PutItem API operation for Amazon DynamoDB. // -// Restores the specified table to the specified point in time within EarliestRestorableDateTime -// and LatestRestorableDateTime. You can restore your table to any point in -// time during the last 35 days. Any number of users can execute up to 4 concurrent -// restores (any type of restore) in a given account. +// Creates a new item, or replaces an old item with a new item. If an item that +// has the same primary key as the new item already exists in the specified +// table, the new item completely replaces the existing item. You can perform +// a conditional put operation (add a new item if one with the specified primary +// key doesn't exist), or replace an existing item if it has certain attribute +// values. You can return the item's attribute values in the same operation, +// using the ReturnValues parameter. // -// When you restore using point in time recovery, DynamoDB restores your table -// data to the state based on the selected date and time (day:hour:minute:second) -// to a new table. +// This topic provides general information about the PutItem API. // -// Along with data, the following are also included on the new restored table -// using point in time recovery: +// For information on how to call the PutItem API using the AWS SDK in specific +// languages, see the following: // -// * Global secondary indexes (GSIs) +// * PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem) // -// * Local secondary indexes (LSIs) +// * PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem) // -// * Provisioned read and write capacity +// * PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem) // -// * Encryption settings All these settings come from the current settings -// of the source table at the time of restore. +// * PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem) // -// You must manually set up the following on the restored table: +// * PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem) // -// * Auto scaling policies +// * PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem) // -// * IAM policies +// * PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem) // -// * Amazon CloudWatch metrics and alarms +// * PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem) // -// * Tags +// * PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem) // -// * Stream settings +// When you add an item, the primary key attributes are the only required attributes. +// Attribute values cannot be null. // -// * Time to Live (TTL) settings +// Empty String and Binary attribute values are allowed. Attribute values of +// type String and Binary must have a length greater than zero if the attribute +// is used as a key attribute for a table or index. Set type attributes cannot +// be empty. // -// * Point in time recovery settings +// Invalid Requests with empty values will be rejected with a ValidationException +// exception. +// +// To prevent a new item from replacing an existing item, use a conditional +// expression that contains the attribute_not_exists function with the name +// of the attribute being used as the partition key for the table. Since every +// record must contain that attribute, the attribute_not_exists function will +// only succeed if no matching item exists. +// +// For more information about PutItem, see Working with Items (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html) +// in the Amazon DynamoDB Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation RestoreTableToPointInTime for usage and error information. +// API operation PutItem for usage and error information. // // Returned Error Types: -// * TableAlreadyExistsException -// A target table with the specified name already exists. +// * ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. // -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. // -// * TableInUseException -// A target table with the specified name is either being created or deleted. +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// * ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// * TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. -// -// There is a soft account quota of 256 tables. -// -// * InvalidRestoreTimeException -// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime -// and LatestRestorableDateTime. -// -// * PointInTimeRecoveryUnavailableException -// Point in time recovery has not yet been enabled for this source table. +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. // // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime -func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { - req, out := c.RestoreTableToPointInTimeRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/PutItem +func (c *DynamoDB) PutItem(input *PutItemInput) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) return out, req.Send() } -// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of +// PutItemWithContext is the same as PutItem with the addition of // the ability to pass a context and additional request options. // -// See RestoreTableToPointInTime for details on how to use this API operation. +// See PutItem for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) { - req, out := c.RestoreTableToPointInTimeRequest(input) +func (c *DynamoDB) PutItemWithContext(ctx aws.Context, input *PutItemInput, opts ...request.Option) (*PutItemOutput, error) { + req, out := c.PutItemRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opScan = "Scan" +const opQuery = "Query" -// ScanRequest generates a "aws/request.Request" representing the -// client's request for the Scan operation. The "output" return +// QueryRequest generates a "aws/request.Request" representing the +// client's request for the Query operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See Scan for more information on using the Scan +// See Query for more information on using the Query // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the ScanRequest method. -// req, resp := client.ScanRequest(params) +// // Example sending a request using the QueryRequest method. +// req, resp := client.QueryRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan -func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output *QueryOutput) { op := &request.Operation{ - Name: opScan, + Name: opQuery, HTTPMethod: "POST", HTTPPath: "/", Paginator: &request.Paginator{ @@ -4353,10 +4562,10 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * } if input == nil { - input = &ScanInput{} + input = &QueryInput{} } - output = &ScanOutput{} + output = &QueryOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -4386,43 +4595,63 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * return } -// Scan API operation for Amazon DynamoDB. +// Query API operation for Amazon DynamoDB. // -// The Scan operation returns one or more items and item attributes by accessing -// every item in a table or a secondary index. To have DynamoDB return fewer -// items, you can provide a FilterExpression operation. +// The Query operation finds items based on primary key values. You can query +// any table or secondary index that has a composite primary key (a partition +// key and a sort key). // -// If the total number of scanned items exceeds the maximum dataset size limit -// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey -// value to continue the scan in a subsequent operation. The results also include -// the number of items exceeding the limit. A scan can result in no table data -// meeting the filter criteria. +// Use the KeyConditionExpression parameter to provide a specific value for +// the partition key. The Query operation will return all of the items from +// the table or index with that partition key value. You can optionally narrow +// the scope of the Query operation by specifying a sort key value and a comparison +// operator in KeyConditionExpression. To further refine the Query results, +// you can optionally provide a FilterExpression. A FilterExpression determines +// which items within the results should be returned to you. All of the other +// results are discarded. // -// A single Scan operation reads up to the maximum number of items set (if using -// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering -// to the results using FilterExpression. If LastEvaluatedKey is present in -// the response, you need to paginate the result set. For more information, -// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) -// in the Amazon DynamoDB Developer Guide. +// A Query operation always returns a result set. If no matching items are found, +// the result set will be empty. Queries that do not return results consume +// the minimum number of read capacity units for that type of read operation. // -// Scan operations proceed sequentially; however, for faster performance on -// a large table or secondary index, applications can request a parallel Scan -// operation by providing the Segment and TotalSegments parameters. For more -// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) +// DynamoDB calculates the number of read capacity units consumed based on item +// size, not on the amount of data that is returned to an application. The number +// of capacity units consumed will be the same whether you request all of the +// attributes (the default behavior) or just some of them (using a projection +// expression). The number will also be the same whether or not you use a FilterExpression. +// +// Query results are always sorted by the sort key value. If the data type of +// the sort key is Number, the results are returned in numeric order; otherwise, +// the results are returned in order of UTF-8 bytes. By default, the sort order +// is ascending. To reverse the order, set the ScanIndexForward parameter to +// false. +// +// A single Query operation will read up to the maximum number of items set +// (if using the Limit parameter) or a maximum of 1 MB of data and then apply +// any filtering to the results using FilterExpression. If LastEvaluatedKey +// is present in the response, you will need to paginate the result set. For +// more information, see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination) // in the Amazon DynamoDB Developer Guide. // -// Scan uses eventually consistent reads when accessing the data in a table; -// therefore, the result set might not include the changes to data in the table -// immediately before the operation began. If you need a consistent copy of -// the data, as of the time that the Scan begins, you can set the ConsistentRead -// parameter to true. +// FilterExpression is applied after a Query finishes, but before the results +// are returned. A FilterExpression cannot contain partition key or sort key +// attributes. You need to specify those attributes in the KeyConditionExpression. +// +// A Query operation can return an empty result set and a LastEvaluatedKey if +// all the items read for the page of results are filtered out. +// +// You can query a table, a local secondary index, or a global secondary index. +// For a query on a table or on a local secondary index, you can set the ConsistentRead +// parameter to true and obtain a strongly consistent result. Global secondary +// indexes support eventually consistent reads only, so do not specify ConsistentRead +// when querying a global secondary index. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation Scan for usage and error information. +// API operation Query for usage and error information. // // Returned Error Types: // * ProvisionedThroughputExceededException @@ -4445,65 +4674,65 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output * // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan -func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { - req, out := c.ScanRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Query +func (c *DynamoDB) Query(input *QueryInput) (*QueryOutput, error) { + req, out := c.QueryRequest(input) return out, req.Send() } -// ScanWithContext is the same as Scan with the addition of +// QueryWithContext is the same as Query with the addition of // the ability to pass a context and additional request options. // -// See Scan for details on how to use this API operation. +// See Query for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) { - req, out := c.ScanRequest(input) +func (c *DynamoDB) QueryWithContext(ctx aws.Context, input *QueryInput, opts ...request.Option) (*QueryOutput, error) { + req, out := c.QueryRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// ScanPages iterates over the pages of a Scan operation, +// QueryPages iterates over the pages of a Query operation, // calling the "fn" function with the response data for each page. To stop // iterating, return false from the fn function. // -// See Scan method for more information on how to use this operation. +// See Query method for more information on how to use this operation. // // Note: This operation can generate multiple requests to a service. // -// // Example iterating over at most 3 pages of a Scan operation. +// // Example iterating over at most 3 pages of a Query operation. // pageNum := 0 -// err := client.ScanPages(params, -// func(page *dynamodb.ScanOutput, lastPage bool) bool { +// err := client.QueryPages(params, +// func(page *dynamodb.QueryOutput, lastPage bool) bool { // pageNum++ // fmt.Println(page) // return pageNum <= 3 // }) // -func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { - return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) +func (c *DynamoDB) QueryPages(input *QueryInput, fn func(*QueryOutput, bool) bool) error { + return c.QueryPagesWithContext(aws.BackgroundContext(), input, fn) } -// ScanPagesWithContext same as ScanPages except +// QueryPagesWithContext same as QueryPages except // it takes a Context and allows setting request options on the pages. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error { +func (c *DynamoDB) QueryPagesWithContext(ctx aws.Context, input *QueryInput, fn func(*QueryOutput, bool) bool, opts ...request.Option) error { p := request.Pagination{ NewRequest: func() (*request.Request, error) { - var inCpy *ScanInput + var inCpy *QueryInput if input != nil { tmp := *input inCpy = &tmp } - req, _ := c.ScanRequest(inCpy) + req, _ := c.QueryRequest(inCpy) req.SetContext(ctx) req.ApplyOptions(opts...) return req, nil @@ -4511,7 +4740,7 @@ func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn fu } for p.Next() { - if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) { + if !fn(p.Page().(*QueryOutput), !p.HasNextPage()) { break } } @@ -4519,46 +4748,45 @@ func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn fu return p.Err() } -const opTagResource = "TagResource" +const opRestoreTableFromBackup = "RestoreTableFromBackup" -// TagResourceRequest generates a "aws/request.Request" representing the -// client's request for the TagResource operation. The "output" return +// RestoreTableFromBackupRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableFromBackup operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TagResource for more information on using the TagResource +// See RestoreTableFromBackup for more information on using the RestoreTableFromBackup // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TagResourceRequest method. -// req, resp := client.TagResourceRequest(params) +// // Example sending a request using the RestoreTableFromBackupRequest method. +// req, resp := client.RestoreTableFromBackupRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource -func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackupRequest(input *RestoreTableFromBackupInput) (req *request.Request, output *RestoreTableFromBackupOutput) { op := &request.Operation{ - Name: opTagResource, + Name: opRestoreTableFromBackup, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TagResourceInput{} + input = &RestoreTableFromBackupInput{} } - output = &TagResourceOutput{} + output = &RestoreTableFromBackupOutput{} req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { @@ -4587,24 +4815,48 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req return } -// TagResource API operation for Amazon DynamoDB. +// RestoreTableFromBackup API operation for Amazon DynamoDB. // -// Associate a set of tags with an Amazon DynamoDB resource. You can then activate -// these user-defined tags so that they appear on the Billing and Cost Management -// console for cost allocation tracking. You can call TagResource up to five -// times per second, per account. +// Creates a new table from an existing backup. Any number of users can execute +// up to 4 concurrent restores (any type of restore) in a given account. // -// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) -// in the Amazon DynamoDB Developer Guide. +// You can call RestoreTableFromBackup at a maximum rate of 10 times per second. +// +// You must manually set up the following on the restored table: +// +// * Auto scaling policies +// +// * IAM policies +// +// * Amazon CloudWatch metrics and alarms +// +// * Tags +// +// * Stream settings +// +// * Time to Live (TTL) settings // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation TagResource for usage and error information. +// API operation RestoreTableFromBackup for usage and error information. // // Returned Error Types: +// * TableAlreadyExistsException +// A target table with the specified name already exists. +// +// * TableInUseException +// A target table with the specified name is either being created or deleted. +// +// * BackupNotFoundException +// Backup not found for the given BackupARN. +// +// * BackupInUseException +// There is another ongoing conflicting backup control plane operation on the +// table. The backup is either being created, deleted or restored to a table. +// // * LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // @@ -4619,78 +4871,69 @@ func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Req // // There is a soft account quota of 256 tables. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// // * InternalServerError // An error occurred on the server side. // -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource -func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableFromBackup +func (c *DynamoDB) RestoreTableFromBackup(input *RestoreTableFromBackupInput) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) return out, req.Send() } -// TagResourceWithContext is the same as TagResource with the addition of +// RestoreTableFromBackupWithContext is the same as RestoreTableFromBackup with the addition of // the ability to pass a context and additional request options. // -// See TagResource for details on how to use this API operation. +// See RestoreTableFromBackup for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { - req, out := c.TagResourceRequest(input) +func (c *DynamoDB) RestoreTableFromBackupWithContext(ctx aws.Context, input *RestoreTableFromBackupInput, opts ...request.Option) (*RestoreTableFromBackupOutput, error) { + req, out := c.RestoreTableFromBackupRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTransactGetItems = "TransactGetItems" +const opRestoreTableToPointInTime = "RestoreTableToPointInTime" -// TransactGetItemsRequest generates a "aws/request.Request" representing the -// client's request for the TransactGetItems operation. The "output" return +// RestoreTableToPointInTimeRequest generates a "aws/request.Request" representing the +// client's request for the RestoreTableToPointInTime operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TransactGetItems for more information on using the TransactGetItems +// See RestoreTableToPointInTime for more information on using the RestoreTableToPointInTime // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TransactGetItemsRequest method. -// req, resp := client.TransactGetItemsRequest(params) +// // Example sending a request using the RestoreTableToPointInTimeRequest method. +// req, resp := client.RestoreTableToPointInTimeRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems -func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTimeRequest(input *RestoreTableToPointInTimeInput) (req *request.Request, output *RestoreTableToPointInTimeOutput) { op := &request.Operation{ - Name: opTransactGetItems, + Name: opRestoreTableToPointInTime, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &TransactGetItemsInput{} + input = &RestoreTableToPointInTimeInput{} } - output = &TransactGetItemsOutput{} + output = &RestoreTableToPointInTimeOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -4720,204 +4963,153 @@ func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *r return } -// TransactGetItems API operation for Amazon DynamoDB. -// -// TransactGetItems is a synchronous operation that atomically retrieves multiple -// items from one or more tables (but not from indexes) in a single account -// and Region. A TransactGetItems call can contain up to 25 TransactGetItem -// objects, each of which contains a Get structure that specifies an item to -// retrieve from a table in the account and Region. A call to TransactGetItems -// cannot retrieve items from tables in more than one AWS account or Region. -// The aggregate size of the items in the transaction cannot exceed 4 MB. -// -// DynamoDB rejects the entire TransactGetItems request if any of the following -// is true: -// -// * A conflicting operation is in the process of updating an item to be -// read. -// -// * There is insufficient provisioned capacity for the transaction to be -// completed. -// -// * There is a user error, such as an invalid data format. -// -// * The aggregate size of the items in the transaction cannot exceed 4 MB. -// -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// RestoreTableToPointInTime API operation for Amazon DynamoDB. // -// See the AWS API reference guide for Amazon DynamoDB's -// API operation TransactGetItems for usage and error information. +// Restores the specified table to the specified point in time within EarliestRestorableDateTime +// and LatestRestorableDateTime. You can restore your table to any point in +// time during the last 35 days. Any number of users can execute up to 4 concurrent +// restores (any type of restore) in a given account. // -// Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// When you restore using point in time recovery, DynamoDB restores your table +// data to the state based on the selected date and time (day:hour:minute:second) +// to a new table. // -// * TransactionCanceledException -// The entire transaction request was canceled. +// Along with data, the following are also included on the new restored table +// using point in time recovery: // -// DynamoDB cancels a TransactWriteItems request under the following circumstances: +// * Global secondary indexes (GSIs) // -// * A condition in one of the condition expressions is not met. +// * Local secondary indexes (LSIs) // -// * A table in the TransactWriteItems request is in a different account -// or region. +// * Provisioned read and write capacity // -// * More than one action in the TransactWriteItems operation targets the -// same item. +// * Encryption settings All these settings come from the current settings +// of the source table at the time of restore. // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// You must manually set up the following on the restored table: // -// * An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. +// * Auto scaling policies // -// * There is a user error, such as an invalid data format. +// * IAM policies // -// DynamoDB cancels a TransactGetItems request under the following circumstances: +// * Amazon CloudWatch metrics and alarms // -// * There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// * Tags // -// * A table in the TransactGetItems request is in a different account or -// region. +// * Stream settings // -// * There is insufficient provisioned capacity for the transaction to be -// completed. +// * Time to Live (TTL) settings // -// * There is a user error, such as an invalid data format. +// * Point in time recovery settings // -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have NONE code and Null message. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// Cancellation reason codes and possible error messages: +// See the AWS API reference guide for Amazon DynamoDB's +// API operation RestoreTableToPointInTime for usage and error information. // -// * No Errors: Code: NONE Message: null +// Returned Error Types: +// * TableAlreadyExistsException +// A target table with the specified name already exists. // -// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. +// * TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account. // -// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. +// * TableInUseException +// A target table with the specified name is either being created or deleted. // -// * Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. // -// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. // -// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. // -// * Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. +// There is a soft account quota of 256 tables. // -// * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. +// * InvalidRestoreTimeException +// An invalid restore time was specified. RestoreDateTime must be between EarliestRestorableDateTime +// and LatestRestorableDateTime. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a quota increase. +// * PointInTimeRecoveryUnavailableException +// Point in time recovery has not yet been enabled for this source table. // // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems -func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { - req, out := c.TransactGetItemsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/RestoreTableToPointInTime +func (c *DynamoDB) RestoreTableToPointInTime(input *RestoreTableToPointInTimeInput) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) return out, req.Send() } -// TransactGetItemsWithContext is the same as TransactGetItems with the addition of +// RestoreTableToPointInTimeWithContext is the same as RestoreTableToPointInTime with the addition of // the ability to pass a context and additional request options. // -// See TransactGetItems for details on how to use this API operation. +// See RestoreTableToPointInTime for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) { - req, out := c.TransactGetItemsRequest(input) +func (c *DynamoDB) RestoreTableToPointInTimeWithContext(ctx aws.Context, input *RestoreTableToPointInTimeInput, opts ...request.Option) (*RestoreTableToPointInTimeOutput, error) { + req, out := c.RestoreTableToPointInTimeRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opTransactWriteItems = "TransactWriteItems" +const opScan = "Scan" -// TransactWriteItemsRequest generates a "aws/request.Request" representing the -// client's request for the TransactWriteItems operation. The "output" return +// ScanRequest generates a "aws/request.Request" representing the +// client's request for the Scan operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See TransactWriteItems for more information on using the TransactWriteItems +// See Scan for more information on using the Scan // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the TransactWriteItemsRequest method. -// req, resp := client.TransactWriteItemsRequest(params) +// // Example sending a request using the ScanRequest method. +// req, resp := client.ScanRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems -func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *ScanOutput) { op := &request.Operation{ - Name: opTransactWriteItems, + Name: opScan, HTTPMethod: "POST", HTTPPath: "/", + Paginator: &request.Paginator{ + InputTokens: []string{"ExclusiveStartKey"}, + OutputTokens: []string{"LastEvaluatedKey"}, + LimitToken: "Limit", + TruncationToken: "", + }, } if input == nil { - input = &TransactWriteItemsInput{} + input = &ScanInput{} } - output = &TransactWriteItemsOutput{} + output = &ScanOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -4947,169 +5139,45 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re return } -// TransactWriteItems API operation for Amazon DynamoDB. +// Scan API operation for Amazon DynamoDB. // -// TransactWriteItems is a synchronous write operation that groups up to 25 -// action requests. These actions can target items in different tables, but -// not in different AWS accounts or Regions, and no two actions can target the -// same item. For example, you cannot both ConditionCheck and Update the same -// item. The aggregate size of the items in the transaction cannot exceed 4 -// MB. +// The Scan operation returns one or more items and item attributes by accessing +// every item in a table or a secondary index. To have DynamoDB return fewer +// items, you can provide a FilterExpression operation. // -// The actions are completed atomically so that either all of them succeed, -// or all of them fail. They are defined by the following objects: +// If the total number of scanned items exceeds the maximum dataset size limit +// of 1 MB, the scan stops and results are returned to the user as a LastEvaluatedKey +// value to continue the scan in a subsequent operation. The results also include +// the number of items exceeding the limit. A scan can result in no table data +// meeting the filter criteria. // -// * Put — Initiates a PutItem operation to write a new item. This structure -// specifies the primary key of the item to be written, the name of the table -// to write it in, an optional condition expression that must be satisfied -// for the write to succeed, a list of the item's attributes, and a field -// indicating whether to retrieve the item's attributes if the condition -// is not met. +// A single Scan operation reads up to the maximum number of items set (if using +// the Limit parameter) or a maximum of 1 MB of data and then apply any filtering +// to the results using FilterExpression. If LastEvaluatedKey is present in +// the response, you need to paginate the result set. For more information, +// see Paginating the Results (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination) +// in the Amazon DynamoDB Developer Guide. // -// * Update — Initiates an UpdateItem operation to update an existing item. -// This structure specifies the primary key of the item to be updated, the -// name of the table where it resides, an optional condition expression that -// must be satisfied for the update to succeed, an expression that defines -// one or more attributes to be updated, and a field indicating whether to -// retrieve the item's attributes if the condition is not met. +// Scan operations proceed sequentially; however, for faster performance on +// a large table or secondary index, applications can request a parallel Scan +// operation by providing the Segment and TotalSegments parameters. For more +// information, see Parallel Scan (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan) +// in the Amazon DynamoDB Developer Guide. // -// * Delete — Initiates a DeleteItem operation to delete an existing item. -// This structure specifies the primary key of the item to be deleted, the -// name of the table where it resides, an optional condition expression that -// must be satisfied for the deletion to succeed, and a field indicating -// whether to retrieve the item's attributes if the condition is not met. -// -// * ConditionCheck — Applies a condition to an item that is not being -// modified by the transaction. This structure specifies the primary key -// of the item to be checked, the name of the table where it resides, a condition -// expression that must be satisfied for the transaction to succeed, and -// a field indicating whether to retrieve the item's attributes if the condition -// is not met. -// -// DynamoDB rejects the entire TransactWriteItems request if any of the following -// is true: -// -// * A condition in one of the condition expressions is not met. -// -// * An ongoing operation is in the process of updating the same item. -// -// * There is insufficient provisioned capacity for the transaction to be -// completed. -// -// * An item size becomes too large (bigger than 400 KB), a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// * The aggregate size of the items in the transaction exceeds 4 MB. -// -// * There is a user error, such as an invalid data format. +// Scan uses eventually consistent reads when accessing the data in a table; +// therefore, the result set might not include the changes to data in the table +// immediately before the operation began. If you need a consistent copy of +// the data, as of the time that the Scan begins, you can set the ConsistentRead +// parameter to true. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation TransactWriteItems for usage and error information. +// API operation Scan for usage and error information. // // Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * TransactionCanceledException -// The entire transaction request was canceled. -// -// DynamoDB cancels a TransactWriteItems request under the following circumstances: -// -// * A condition in one of the condition expressions is not met. -// -// * A table in the TransactWriteItems request is in a different account -// or region. -// -// * More than one action in the TransactWriteItems operation targets the -// same item. -// -// * There is insufficient provisioned capacity for the transaction to be -// completed. -// -// * An item size becomes too large (larger than 400 KB), or a local secondary -// index (LSI) becomes too large, or a similar validation error occurs because -// of changes made by the transaction. -// -// * There is a user error, such as an invalid data format. -// -// DynamoDB cancels a TransactGetItems request under the following circumstances: -// -// * There is an ongoing TransactGetItems operation that conflicts with a -// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. -// In this case the TransactGetItems operation fails with a TransactionCanceledException. -// -// * A table in the TransactGetItems request is in a different account or -// region. -// -// * There is insufficient provisioned capacity for the transaction to be -// completed. -// -// * There is a user error, such as an invalid data format. -// -// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons -// property. This property is not set for other languages. Transaction cancellation -// reasons are ordered in the order of requested items, if an item has no error -// it will have NONE code and Null message. -// -// Cancellation reason codes and possible error messages: -// -// * No Errors: Code: NONE Message: null -// -// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The -// conditional request failed. -// -// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded -// Message: Collection size exceeded. -// -// * Transaction Conflict: Code: TransactionConflict Message: Transaction -// is ongoing for the item. -// -// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded -// Messages: The level of configured provisioned throughput for the table -// was exceeded. Consider increasing your provisioning level with the UpdateTable -// API. This Message is received when provisioned throughput is exceeded -// is on a provisioned DynamoDB table. The level of configured provisioned -// throughput for one or more global secondary indexes of the table was exceeded. -// Consider increasing your provisioning level for the under-provisioned -// global secondary indexes with the UpdateTable API. This message is returned -// when provisioned throughput is exceeded is on a provisioned GSI. -// -// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds -// the current capacity of your table or index. DynamoDB is automatically -// scaling your table or index so please try again shortly. If exceptions -// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. -// This message is returned when writes get throttled on an On-Demand table -// as DynamoDB is automatically scaling the table. Throughput exceeds the -// current capacity for one or more global secondary indexes. DynamoDB is -// automatically scaling your index so please try again shortly. This message -// is returned when when writes get throttled on an On-Demand GSI as DynamoDB -// is automatically scaling the GSI. -// -// * Validation Error: Code: ValidationError Messages: One or more parameter -// values were invalid. The update expression attempted to update the secondary -// index key beyond allowed size limits. The update expression attempted -// to update the secondary index key to unsupported type. An operand in the -// update expression has an incorrect data type. Item size to update has -// exceeded the maximum allowed size. Number overflow. Attempting to store -// a number with magnitude larger than supported range. Type mismatch for -// attribute to update. Nesting Levels have exceeded supported limits. The -// document path provided in the update expression is invalid for update. -// The provided expression refers to an attribute that does not exist in -// the item. -// -// * TransactionInProgressException -// The transaction with the given request token is already in progress. -// -// * IdempotentParameterMismatchException -// DynamoDB rejected the request because you retried a request with a different -// payload but with an idempotent token that was already used. -// // * ProvisionedThroughputExceededException // Your request rate is too high. The AWS SDKs for DynamoDB automatically retry // requests that receive this exception. Your request is eventually successful, @@ -5118,6 +5186,10 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) // in the Amazon DynamoDB Developer Guide. // +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// // * RequestLimitExceeded // Throughput exceeds the current throughput quota for your account. Please // contact AWS Support at AWS Support (https://aws.amazon.com/support) to request @@ -5126,66 +5198,118 @@ func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (re // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems -func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { - req, out := c.TransactWriteItemsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/Scan +func (c *DynamoDB) Scan(input *ScanInput) (*ScanOutput, error) { + req, out := c.ScanRequest(input) return out, req.Send() } -// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of +// ScanWithContext is the same as Scan with the addition of // the ability to pass a context and additional request options. // -// See TransactWriteItems for details on how to use this API operation. +// See Scan for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) { - req, out := c.TransactWriteItemsRequest(input) +func (c *DynamoDB) ScanWithContext(ctx aws.Context, input *ScanInput, opts ...request.Option) (*ScanOutput, error) { + req, out := c.ScanRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUntagResource = "UntagResource" +// ScanPages iterates over the pages of a Scan operation, +// calling the "fn" function with the response data for each page. To stop +// iterating, return false from the fn function. +// +// See Scan method for more information on how to use this operation. +// +// Note: This operation can generate multiple requests to a service. +// +// // Example iterating over at most 3 pages of a Scan operation. +// pageNum := 0 +// err := client.ScanPages(params, +// func(page *dynamodb.ScanOutput, lastPage bool) bool { +// pageNum++ +// fmt.Println(page) +// return pageNum <= 3 +// }) +// +func (c *DynamoDB) ScanPages(input *ScanInput, fn func(*ScanOutput, bool) bool) error { + return c.ScanPagesWithContext(aws.BackgroundContext(), input, fn) +} -// UntagResourceRequest generates a "aws/request.Request" representing the -// client's request for the UntagResource operation. The "output" return +// ScanPagesWithContext same as ScanPages except +// it takes a Context and allows setting request options on the pages. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) ScanPagesWithContext(ctx aws.Context, input *ScanInput, fn func(*ScanOutput, bool) bool, opts ...request.Option) error { + p := request.Pagination{ + NewRequest: func() (*request.Request, error) { + var inCpy *ScanInput + if input != nil { + tmp := *input + inCpy = &tmp + } + req, _ := c.ScanRequest(inCpy) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return req, nil + }, + } + + for p.Next() { + if !fn(p.Page().(*ScanOutput), !p.HasNextPage()) { + break + } + } + + return p.Err() +} + +const opTagResource = "TagResource" + +// TagResourceRequest generates a "aws/request.Request" representing the +// client's request for the TagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UntagResource for more information on using the UntagResource +// See TagResource for more information on using the TagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UntagResourceRequest method. -// req, resp := client.UntagResourceRequest(params) +// // Example sending a request using the TagResourceRequest method. +// req, resp := client.TagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource -func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResourceRequest(input *TagResourceInput) (req *request.Request, output *TagResourceOutput) { op := &request.Operation{ - Name: opUntagResource, + Name: opTagResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UntagResourceInput{} + input = &TagResourceInput{} } - output = &UntagResourceOutput{} + output = &TagResourceOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) // if custom endpoint for the request is set to a non empty string, @@ -5216,10 +5340,12 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request return } -// UntagResource API operation for Amazon DynamoDB. +// TagResource API operation for Amazon DynamoDB. // -// Removes the association of tags from an Amazon DynamoDB resource. You can -// call UntagResource up to five times per second, per account. +// Associate a set of tags with an Amazon DynamoDB resource. You can then activate +// these user-defined tags so that they appear on the Billing and Cost Management +// console for cost allocation tracking. You can call TagResource up to five +// times per second, per account. // // For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) // in the Amazon DynamoDB Developer Guide. @@ -5229,7 +5355,7 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UntagResource for usage and error information. +// API operation TagResource for usage and error information. // // Returned Error Types: // * LimitExceededException @@ -5258,66 +5384,66 @@ func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request // attempted to recreate an existing table, or tried to delete a table currently // in the CREATING state. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource -func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TagResource +func (c *DynamoDB) TagResource(input *TagResourceInput) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) return out, req.Send() } -// UntagResourceWithContext is the same as UntagResource with the addition of +// TagResourceWithContext is the same as TagResource with the addition of // the ability to pass a context and additional request options. // -// See UntagResource for details on how to use this API operation. +// See TagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { - req, out := c.UntagResourceRequest(input) +func (c *DynamoDB) TagResourceWithContext(ctx aws.Context, input *TagResourceInput, opts ...request.Option) (*TagResourceOutput, error) { + req, out := c.TagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateContinuousBackups = "UpdateContinuousBackups" +const opTransactGetItems = "TransactGetItems" -// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateContinuousBackups operation. The "output" return +// TransactGetItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactGetItems operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups +// See TransactGetItems for more information on using the TransactGetItems // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateContinuousBackupsRequest method. -// req, resp := client.UpdateContinuousBackupsRequest(params) +// // Example sending a request using the TransactGetItemsRequest method. +// req, resp := client.TransactGetItemsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups -func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItemsRequest(input *TransactGetItemsInput) (req *request.Request, output *TransactGetItemsOutput) { op := &request.Operation{ - Name: opUpdateContinuousBackups, + Name: opTransactGetItems, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateContinuousBackupsInput{} + input = &TransactGetItemsInput{} } - output = &UpdateContinuousBackupsOutput{} + output = &TransactGetItemsOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -5347,181 +5473,204 @@ func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackups return } -// UpdateContinuousBackups API operation for Amazon DynamoDB. +// TransactGetItems API operation for Amazon DynamoDB. // -// UpdateContinuousBackups enables or disables point in time recovery for the -// specified table. A successful UpdateContinuousBackups call returns the current -// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables -// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus -// will be set to ENABLED. +// TransactGetItems is a synchronous operation that atomically retrieves multiple +// items from one or more tables (but not from indexes) in a single account +// and Region. A TransactGetItems call can contain up to 25 TransactGetItem +// objects, each of which contains a Get structure that specifies an item to +// retrieve from a table in the account and Region. A call to TransactGetItems +// cannot retrieve items from tables in more than one AWS account or Region. +// The aggregate size of the items in the transaction cannot exceed 4 MB. // -// Once continuous backups and point in time recovery are enabled, you can restore -// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// DynamoDB rejects the entire TransactGetItems request if any of the following +// is true: // -// LatestRestorableDateTime is typically 5 minutes before the current time. -// You can restore your table to any point in time during the last 35 days. +// * A conflicting operation is in the process of updating an item to be +// read. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * There is a user error, such as an invalid data format. +// +// * The aggregate size of the items in the transaction cannot exceed 4 MB. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateContinuousBackups for usage and error information. +// API operation TransactGetItems for usage and error information. // // Returned Error Types: -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * ContinuousBackupsUnavailableException -// Backups have not yet been enabled for this table. +// * TransactionCanceledException +// The entire transaction request was canceled. // -// * InternalServerError -// An error occurred on the server side. +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups -func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { - req, out := c.UpdateContinuousBackupsRequest(input) - return out, req.Send() -} - -// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of -// the ability to pass a context and additional request options. +// * A condition in one of the condition expressions is not met. // -// See UpdateContinuousBackups for details on how to use this API operation. +// * A table in the TransactWriteItems request is in a different account +// or region. // -// The context must be non-nil and will be used for request cancellation. If -// the context is nil a panic will occur. In the future the SDK may create -// sub-contexts for http.Requests. See https://golang.org/pkg/context/ -// for more information on using Contexts. -func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) { - req, out := c.UpdateContinuousBackupsRequest(input) - req.SetContext(ctx) - req.ApplyOptions(opts...) - return out, req.Send() -} - -const opUpdateContributorInsights = "UpdateContributorInsights" - -// UpdateContributorInsightsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateContributorInsights operation. The "output" return -// value will be populated with the request's response once the request completes -// successfully. +// * More than one action in the TransactWriteItems operation targets the +// same item. // -// Use "Send" method on the returned Request to send the API call to the service. -// the "output" return value is not valid until after Send returns without error. +// * There is insufficient provisioned capacity for the transaction to be +// completed. // -// See UpdateContributorInsights for more information on using the UpdateContributorInsights -// API call, and error handling. +// * An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. // -// This method is useful when you want to inject custom logic or configuration -// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// * There is a user error, such as an invalid data format. // +// DynamoDB cancels a TransactGetItems request under the following circumstances: // -// // Example sending a request using the UpdateContributorInsightsRequest method. -// req, resp := client.UpdateContributorInsightsRequest(params) +// * There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. // -// err := req.Send() -// if err == nil { // resp is now filled -// fmt.Println(resp) -// } +// * A table in the TransactGetItems request is in a different account or +// region. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights -func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) { - op := &request.Operation{ - Name: opUpdateContributorInsights, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &UpdateContributorInsightsInput{} - } - - output = &UpdateContributorInsightsOutput{} - req = c.newRequest(op, input, output) - return -} - -// UpdateContributorInsights API operation for Amazon DynamoDB. +// * There is insufficient provisioned capacity for the transaction to be +// completed. // -// Updates the status for contributor insights for a specific table or index. +// * There is a user error, such as an invalid data format. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. // -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateContributorInsights for usage and error information. +// Cancellation reason codes and possible error messages: // -// Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. // // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights -func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) { - req, out := c.UpdateContributorInsightsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactGetItems +func (c *DynamoDB) TransactGetItems(input *TransactGetItemsInput) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) return out, req.Send() } -// UpdateContributorInsightsWithContext is the same as UpdateContributorInsights with the addition of +// TransactGetItemsWithContext is the same as TransactGetItems with the addition of // the ability to pass a context and additional request options. // -// See UpdateContributorInsights for details on how to use this API operation. +// See TransactGetItems for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateContributorInsightsWithContext(ctx aws.Context, input *UpdateContributorInsightsInput, opts ...request.Option) (*UpdateContributorInsightsOutput, error) { - req, out := c.UpdateContributorInsightsRequest(input) +func (c *DynamoDB) TransactGetItemsWithContext(ctx aws.Context, input *TransactGetItemsInput, opts ...request.Option) (*TransactGetItemsOutput, error) { + req, out := c.TransactGetItemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateGlobalTable = "UpdateGlobalTable" +const opTransactWriteItems = "TransactWriteItems" -// UpdateGlobalTableRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGlobalTable operation. The "output" return +// TransactWriteItemsRequest generates a "aws/request.Request" representing the +// client's request for the TransactWriteItems operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateGlobalTable for more information on using the UpdateGlobalTable +// See TransactWriteItems for more information on using the TransactWriteItems // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateGlobalTableRequest method. -// req, resp := client.UpdateGlobalTableRequest(params) +// // Example sending a request using the TransactWriteItemsRequest method. +// req, resp := client.TransactWriteItemsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable -func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItemsRequest(input *TransactWriteItemsInput) (req *request.Request, output *TransactWriteItemsOutput) { op := &request.Operation{ - Name: opUpdateGlobalTable, + Name: opTransactWriteItems, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateGlobalTableInput{} + input = &TransactWriteItemsInput{} } - output = &UpdateGlobalTableOutput{} + output = &TransactWriteItemsOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -5551,114 +5700,247 @@ func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req return } -// UpdateGlobalTable API operation for Amazon DynamoDB. -// -// Adds or removes replicas in the specified global table. The global table -// must already exist to be able to use this operation. Any replica to be added -// must be empty, have the same name as the global table, have the same key -// schema, have DynamoDB Streams enabled, and have the same provisioned and -// maximum write capacity units. +// TransactWriteItems API operation for Amazon DynamoDB. // -// Although you can use UpdateGlobalTable to add replicas and remove replicas -// in a single request, for simplicity we recommend that you issue separate -// requests for adding or removing replicas. +// TransactWriteItems is a synchronous write operation that groups up to 25 +// action requests. These actions can target items in different tables, but +// not in different AWS accounts or Regions, and no two actions can target the +// same item. For example, you cannot both ConditionCheck and Update the same +// item. The aggregate size of the items in the transaction cannot exceed 4 +// MB. // -// If global secondary indexes are specified, then the following conditions -// must also be met: +// The actions are completed atomically so that either all of them succeed, +// or all of them fail. They are defined by the following objects: // -// * The global secondary indexes must have the same name. +// * Put — Initiates a PutItem operation to write a new item. This structure +// specifies the primary key of the item to be written, the name of the table +// to write it in, an optional condition expression that must be satisfied +// for the write to succeed, a list of the item's attributes, and a field +// indicating whether to retrieve the item's attributes if the condition +// is not met. // -// * The global secondary indexes must have the same hash key and sort key -// (if present). +// * Update — Initiates an UpdateItem operation to update an existing item. +// This structure specifies the primary key of the item to be updated, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the update to succeed, an expression that defines +// one or more attributes to be updated, and a field indicating whether to +// retrieve the item's attributes if the condition is not met. // -// * The global secondary indexes must have the same provisioned and maximum -// write capacity units. +// * Delete — Initiates a DeleteItem operation to delete an existing item. +// This structure specifies the primary key of the item to be deleted, the +// name of the table where it resides, an optional condition expression that +// must be satisfied for the deletion to succeed, and a field indicating +// whether to retrieve the item's attributes if the condition is not met. +// +// * ConditionCheck — Applies a condition to an item that is not being +// modified by the transaction. This structure specifies the primary key +// of the item to be checked, the name of the table where it resides, a condition +// expression that must be satisfied for the transaction to succeed, and +// a field indicating whether to retrieve the item's attributes if the condition +// is not met. +// +// DynamoDB rejects the entire TransactWriteItems request if any of the following +// is true: +// +// * A condition in one of the condition expressions is not met. +// +// * An ongoing operation is in the process of updating the same item. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * An item size becomes too large (bigger than 400 KB), a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// * The aggregate size of the items in the transaction exceeds 4 MB. +// +// * There is a user error, such as an invalid data format. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateGlobalTable for usage and error information. +// API operation TransactWriteItems for usage and error information. // // Returned Error Types: -// * InternalServerError -// An error occurred on the server side. +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. // -// * GlobalTableNotFoundException -// The specified global table does not exist. +// * TransactionCanceledException +// The entire transaction request was canceled. // -// * ReplicaAlreadyExistsException -// The specified replica is already part of the global table. +// DynamoDB cancels a TransactWriteItems request under the following circumstances: // -// * ReplicaNotFoundException -// The specified replica is no longer part of the global table. +// * A condition in one of the condition expressions is not met. // -// * TableNotFoundException -// A source table with the name TableName does not currently exist within the -// subscriber's account. +// * A table in the TransactWriteItems request is in a different account +// or region. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable -func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { - req, out := c.UpdateGlobalTableRequest(input) +// * More than one action in the TransactWriteItems operation targets the +// same item. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * An item size becomes too large (larger than 400 KB), or a local secondary +// index (LSI) becomes too large, or a similar validation error occurs because +// of changes made by the transaction. +// +// * There is a user error, such as an invalid data format. +// +// DynamoDB cancels a TransactGetItems request under the following circumstances: +// +// * There is an ongoing TransactGetItems operation that conflicts with a +// concurrent PutItem, UpdateItem, DeleteItem or TransactWriteItems request. +// In this case the TransactGetItems operation fails with a TransactionCanceledException. +// +// * A table in the TransactGetItems request is in a different account or +// region. +// +// * There is insufficient provisioned capacity for the transaction to be +// completed. +// +// * There is a user error, such as an invalid data format. +// +// If using Java, DynamoDB lists the cancellation reasons on the CancellationReasons +// property. This property is not set for other languages. Transaction cancellation +// reasons are ordered in the order of requested items, if an item has no error +// it will have NONE code and Null message. +// +// Cancellation reason codes and possible error messages: +// +// * No Errors: Code: NONE Message: null +// +// * Conditional Check Failed: Code: ConditionalCheckFailed Message: The +// conditional request failed. +// +// * Item Collection Size Limit Exceeded: Code: ItemCollectionSizeLimitExceeded +// Message: Collection size exceeded. +// +// * Transaction Conflict: Code: TransactionConflict Message: Transaction +// is ongoing for the item. +// +// * Provisioned Throughput Exceeded: Code: ProvisionedThroughputExceeded +// Messages: The level of configured provisioned throughput for the table +// was exceeded. Consider increasing your provisioning level with the UpdateTable +// API. This Message is received when provisioned throughput is exceeded +// is on a provisioned DynamoDB table. The level of configured provisioned +// throughput for one or more global secondary indexes of the table was exceeded. +// Consider increasing your provisioning level for the under-provisioned +// global secondary indexes with the UpdateTable API. This message is returned +// when provisioned throughput is exceeded is on a provisioned GSI. +// +// * Throttling Error: Code: ThrottlingError Messages: Throughput exceeds +// the current capacity of your table or index. DynamoDB is automatically +// scaling your table or index so please try again shortly. If exceptions +// persist, check if you have a hot key: https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/bp-partition-key-design.html. +// This message is returned when writes get throttled on an On-Demand table +// as DynamoDB is automatically scaling the table. Throughput exceeds the +// current capacity for one or more global secondary indexes. DynamoDB is +// automatically scaling your index so please try again shortly. This message +// is returned when when writes get throttled on an On-Demand GSI as DynamoDB +// is automatically scaling the GSI. +// +// * Validation Error: Code: ValidationError Messages: One or more parameter +// values were invalid. The update expression attempted to update the secondary +// index key beyond allowed size limits. The update expression attempted +// to update the secondary index key to unsupported type. An operand in the +// update expression has an incorrect data type. Item size to update has +// exceeded the maximum allowed size. Number overflow. Attempting to store +// a number with magnitude larger than supported range. Type mismatch for +// attribute to update. Nesting Levels have exceeded supported limits. The +// document path provided in the update expression is invalid for update. +// The provided expression refers to an attribute that does not exist in +// the item. +// +// * TransactionInProgressException +// The transaction with the given request token is already in progress. +// +// * IdempotentParameterMismatchException +// DynamoDB rejected the request because you retried a request with a different +// payload but with an idempotent token that was already used. +// +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/TransactWriteItems +func (c *DynamoDB) TransactWriteItems(input *TransactWriteItemsInput) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) return out, req.Send() } -// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of +// TransactWriteItemsWithContext is the same as TransactWriteItems with the addition of // the ability to pass a context and additional request options. // -// See UpdateGlobalTable for details on how to use this API operation. +// See TransactWriteItems for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) { - req, out := c.UpdateGlobalTableRequest(input) +func (c *DynamoDB) TransactWriteItemsWithContext(ctx aws.Context, input *TransactWriteItemsInput, opts ...request.Option) (*TransactWriteItemsOutput, error) { + req, out := c.TransactWriteItemsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" +const opUntagResource = "UntagResource" -// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the -// client's request for the UpdateGlobalTableSettings operation. The "output" return +// UntagResourceRequest generates a "aws/request.Request" representing the +// client's request for the UntagResource operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings +// See UntagResource for more information on using the UntagResource // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateGlobalTableSettingsRequest method. -// req, resp := client.UpdateGlobalTableSettingsRequest(params) +// // Example sending a request using the UntagResourceRequest method. +// req, resp := client.UntagResourceRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings -func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResourceRequest(input *UntagResourceInput) (req *request.Request, output *UntagResourceOutput) { op := &request.Operation{ - Name: opUpdateGlobalTableSettings, + Name: opUntagResource, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateGlobalTableSettingsInput{} + input = &UntagResourceInput{} } - output = &UpdateGlobalTableSettingsOutput{} + output = &UntagResourceOutput{} req = c.newRequest(op, input, output) + req.Handlers.Unmarshal.Swap(jsonrpc.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { @@ -5687,27 +5969,22 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett return } -// UpdateGlobalTableSettings API operation for Amazon DynamoDB. +// UntagResource API operation for Amazon DynamoDB. // -// Updates settings for a global table. +// Removes the association of tags from an Amazon DynamoDB resource. You can +// call UntagResource up to five times per second, per account. +// +// For an overview on tagging DynamoDB resources, see Tagging for DynamoDB (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Tagging.html) +// in the Amazon DynamoDB Developer Guide. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateGlobalTableSettings for usage and error information. +// API operation UntagResource for usage and error information. // // Returned Error Types: -// * GlobalTableNotFoundException -// The specified global table does not exist. -// -// * ReplicaNotFoundException -// The specified replica is no longer part of the global table. -// -// * IndexNotFoundException -// The operation tried to access a nonexistent index. -// // * LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. // @@ -5722,74 +5999,78 @@ func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSett // // There is a soft account quota of 256 tables. // +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * InternalServerError +// An error occurred on the server side. +// // * ResourceInUseException // The operation conflicts with the resource's availability. For example, you // attempted to recreate an existing table, or tried to delete a table currently // in the CREATING state. // -// * InternalServerError -// An error occurred on the server side. -// -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings -func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { - req, out := c.UpdateGlobalTableSettingsRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UntagResource +func (c *DynamoDB) UntagResource(input *UntagResourceInput) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) return out, req.Send() } -// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of +// UntagResourceWithContext is the same as UntagResource with the addition of // the ability to pass a context and additional request options. // -// See UpdateGlobalTableSettings for details on how to use this API operation. +// See UntagResource for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) { - req, out := c.UpdateGlobalTableSettingsRequest(input) +func (c *DynamoDB) UntagResourceWithContext(ctx aws.Context, input *UntagResourceInput, opts ...request.Option) (*UntagResourceOutput, error) { + req, out := c.UntagResourceRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateItem = "UpdateItem" +const opUpdateContinuousBackups = "UpdateContinuousBackups" -// UpdateItemRequest generates a "aws/request.Request" representing the -// client's request for the UpdateItem operation. The "output" return +// UpdateContinuousBackupsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContinuousBackups operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateItem for more information on using the UpdateItem +// See UpdateContinuousBackups for more information on using the UpdateContinuousBackups // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateItemRequest method. -// req, resp := client.UpdateItemRequest(params) +// // Example sending a request using the UpdateContinuousBackupsRequest method. +// req, resp := client.UpdateContinuousBackupsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem -func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackupsRequest(input *UpdateContinuousBackupsInput) (req *request.Request, output *UpdateContinuousBackupsOutput) { op := &request.Operation{ - Name: opUpdateItem, + Name: opUpdateContinuousBackups, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateItemInput{} + input = &UpdateContinuousBackupsInput{} } - output = &UpdateItemOutput{} + output = &UpdateContinuousBackupsOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -5819,364 +6100,317 @@ func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Reque return } -// UpdateItem API operation for Amazon DynamoDB. +// UpdateContinuousBackups API operation for Amazon DynamoDB. // -// Edits an existing item's attributes, or adds a new item to the table if it -// does not already exist. You can put, delete, or add attribute values. You -// can also perform a conditional update on an existing item (insert a new attribute -// name-value pair if it doesn't exist, or replace an existing name-value pair -// if it has certain expected attribute values). +// UpdateContinuousBackups enables or disables point in time recovery for the +// specified table. A successful UpdateContinuousBackups call returns the current +// ContinuousBackupsDescription. Continuous backups are ENABLED on all tables +// at table creation. If point in time recovery is enabled, PointInTimeRecoveryStatus +// will be set to ENABLED. // -// You can also return the item's attribute values in the same UpdateItem operation -// using the ReturnValues parameter. +// Once continuous backups and point in time recovery are enabled, you can restore +// to any point in time within EarliestRestorableDateTime and LatestRestorableDateTime. +// +// LatestRestorableDateTime is typically 5 minutes before the current time. +// You can restore your table to any point in time during the last 35 days. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateItem for usage and error information. +// API operation UpdateContinuousBackups for usage and error information. // // Returned Error Types: -// * ConditionalCheckFailedException -// A condition specified in the operation could not be evaluated. -// -// * ProvisionedThroughputExceededException -// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry -// requests that receive this exception. Your request is eventually successful, -// unless your retry queue is too large to finish. Reduce the frequency of requests -// and use exponential backoff. For more information, go to Error Retries and -// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) -// in the Amazon DynamoDB Developer Guide. -// -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * ItemCollectionSizeLimitExceededException -// An item collection is too large. This exception is only returned for tables -// that have one or more local secondary indexes. -// -// * TransactionConflictException -// Operation was rejected because there is an ongoing transaction for the item. +// * TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account. // -// * RequestLimitExceeded -// Throughput exceeds the current throughput quota for your account. Please -// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request -// a quota increase. +// * ContinuousBackupsUnavailableException +// Backups have not yet been enabled for this table. // // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem -func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { - req, out := c.UpdateItemRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContinuousBackups +func (c *DynamoDB) UpdateContinuousBackups(input *UpdateContinuousBackupsInput) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) return out, req.Send() } -// UpdateItemWithContext is the same as UpdateItem with the addition of +// UpdateContinuousBackupsWithContext is the same as UpdateContinuousBackups with the addition of // the ability to pass a context and additional request options. // -// See UpdateItem for details on how to use this API operation. +// See UpdateContinuousBackups for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) { - req, out := c.UpdateItemRequest(input) +func (c *DynamoDB) UpdateContinuousBackupsWithContext(ctx aws.Context, input *UpdateContinuousBackupsInput, opts ...request.Option) (*UpdateContinuousBackupsOutput, error) { + req, out := c.UpdateContinuousBackupsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTable = "UpdateTable" +const opUpdateContributorInsights = "UpdateContributorInsights" -// UpdateTableRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTable operation. The "output" return +// UpdateContributorInsightsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateContributorInsights operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTable for more information on using the UpdateTable +// See UpdateContributorInsights for more information on using the UpdateContributorInsights // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTableRequest method. -// req, resp := client.UpdateTableRequest(params) +// // Example sending a request using the UpdateContributorInsightsRequest method. +// req, resp := client.UpdateContributorInsightsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable -func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights +func (c *DynamoDB) UpdateContributorInsightsRequest(input *UpdateContributorInsightsInput) (req *request.Request, output *UpdateContributorInsightsOutput) { op := &request.Operation{ - Name: opUpdateTable, + Name: opUpdateContributorInsights, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateTableInput{} + input = &UpdateContributorInsightsInput{} } - output = &UpdateTableOutput{} + output = &UpdateContributorInsightsOutput{} req = c.newRequest(op, input, output) - // if custom endpoint for the request is set to a non empty string, - // we skip the endpoint discovery workflow. - if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { - if aws.BoolValue(req.Config.EnableEndpointDiscovery) { - de := discovererDescribeEndpoints{ - Required: false, - EndpointCache: c.endpointCache, - Params: map[string]*string{ - "op": aws.String(req.Operation.Name), - }, - Client: c, - } - - for k, v := range de.Params { - if v == nil { - delete(de.Params, k) - } - } - - req.Handlers.Build.PushFrontNamed(request.NamedHandler{ - Name: "crr.endpointdiscovery", - Fn: de.Handler, - }) - } - } return } -// UpdateTable API operation for Amazon DynamoDB. -// -// Modifies the provisioned throughput settings, global secondary indexes, or -// DynamoDB Streams settings for a given table. -// -// You can only perform one of the following operations at once: -// -// * Modify the provisioned throughput settings of the table. -// -// * Enable or disable DynamoDB Streams on the table. -// -// * Remove a global secondary index from the table. -// -// * Create a new global secondary index on the table. After the index begins -// backfilling, you can use UpdateTable to perform other operations. +// UpdateContributorInsights API operation for Amazon DynamoDB. // -// UpdateTable is an asynchronous operation; while it is executing, the table -// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot -// issue another UpdateTable request. When the table returns to the ACTIVE state, -// the UpdateTable operation is complete. +// Updates the status for contributor insights for a specific table or index. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateTable for usage and error information. +// API operation UpdateContributorInsights for usage and error information. // // Returned Error Types: -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// // * ResourceNotFoundException // The operation tried to access a nonexistent table or index. The resource // might not be specified correctly, or its status might not be ACTIVE. // -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. -// -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. -// -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. -// -// There is a soft account quota of 256 tables. -// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable -func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateContributorInsights +func (c *DynamoDB) UpdateContributorInsights(input *UpdateContributorInsightsInput) (*UpdateContributorInsightsOutput, error) { + req, out := c.UpdateContributorInsightsRequest(input) return out, req.Send() } -// UpdateTableWithContext is the same as UpdateTable with the addition of +// UpdateContributorInsightsWithContext is the same as UpdateContributorInsights with the addition of // the ability to pass a context and additional request options. // -// See UpdateTable for details on how to use this API operation. +// See UpdateContributorInsights for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { - req, out := c.UpdateTableRequest(input) +func (c *DynamoDB) UpdateContributorInsightsWithContext(ctx aws.Context, input *UpdateContributorInsightsInput, opts ...request.Option) (*UpdateContributorInsightsOutput, error) { + req, out := c.UpdateContributorInsightsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling" +const opUpdateGlobalTable = "UpdateGlobalTable" -// UpdateTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTableReplicaAutoScaling operation. The "output" return +// UpdateGlobalTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTable operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTableReplicaAutoScaling for more information on using the UpdateTableReplicaAutoScaling +// See UpdateGlobalTable for more information on using the UpdateGlobalTable // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. -// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) +// // Example sending a request using the UpdateGlobalTableRequest method. +// req, resp := client.UpdateGlobalTableRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling -func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTableRequest(input *UpdateGlobalTableInput) (req *request.Request, output *UpdateGlobalTableOutput) { op := &request.Operation{ - Name: opUpdateTableReplicaAutoScaling, + Name: opUpdateGlobalTable, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateTableReplicaAutoScalingInput{} + input = &UpdateGlobalTableInput{} } - output = &UpdateTableReplicaAutoScalingOutput{} + output = &UpdateGlobalTableOutput{} req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } return } -// UpdateTableReplicaAutoScaling API operation for Amazon DynamoDB. +// UpdateGlobalTable API operation for Amazon DynamoDB. // -// Updates auto scaling settings on your global tables at once. +// Adds or removes replicas in the specified global table. The global table +// must already exist to be able to use this operation. Any replica to be added +// must be empty, have the same name as the global table, have the same key +// schema, have DynamoDB Streams enabled, and have the same provisioned and +// maximum write capacity units. // -// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) -// of global tables. +// Although you can use UpdateGlobalTable to add replicas and remove replicas +// in a single request, for simplicity we recommend that you issue separate +// requests for adding or removing replicas. // -// Returns awserr.Error for service API and SDK errors. Use runtime type assertions -// with awserr.Error's Code and Message methods to get detailed information about -// the error. +// If global secondary indexes are specified, then the following conditions +// must also be met: // -// See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateTableReplicaAutoScaling for usage and error information. +// * The global secondary indexes must have the same name. // -// Returned Error Types: -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. -// -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. -// -// * LimitExceededException -// There is no limit to the number of daily on-demand backups that can be taken. +// * The global secondary indexes must have the same hash key and sort key +// (if present). // -// Up to 50 simultaneous table operations are allowed per account. These operations -// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, -// and RestoreTableToPointInTime. +// * The global secondary indexes must have the same provisioned and maximum +// write capacity units. // -// The only exception is when you are creating a table with one or more secondary -// indexes. You can have up to 25 such requests running at a time; however, -// if the table or index specifications are complex, DynamoDB might temporarily -// reduce the number of concurrent operations. +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. // -// There is a soft account quota of 256 tables. +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateGlobalTable for usage and error information. // +// Returned Error Types: // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling -func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) { - req, out := c.UpdateTableReplicaAutoScalingRequest(input) +// * GlobalTableNotFoundException +// The specified global table does not exist. +// +// * ReplicaAlreadyExistsException +// The specified replica is already part of the global table. +// +// * ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// * TableNotFoundException +// A source table with the name TableName does not currently exist within the +// subscriber's account. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTable +func (c *DynamoDB) UpdateGlobalTable(input *UpdateGlobalTableInput) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) return out, req.Send() } -// UpdateTableReplicaAutoScalingWithContext is the same as UpdateTableReplicaAutoScaling with the addition of +// UpdateGlobalTableWithContext is the same as UpdateGlobalTable with the addition of // the ability to pass a context and additional request options. // -// See UpdateTableReplicaAutoScaling for details on how to use this API operation. +// See UpdateGlobalTable for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateTableReplicaAutoScalingWithContext(ctx aws.Context, input *UpdateTableReplicaAutoScalingInput, opts ...request.Option) (*UpdateTableReplicaAutoScalingOutput, error) { - req, out := c.UpdateTableReplicaAutoScalingRequest(input) +func (c *DynamoDB) UpdateGlobalTableWithContext(ctx aws.Context, input *UpdateGlobalTableInput, opts ...request.Option) (*UpdateGlobalTableOutput, error) { + req, out := c.UpdateGlobalTableRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -const opUpdateTimeToLive = "UpdateTimeToLive" +const opUpdateGlobalTableSettings = "UpdateGlobalTableSettings" -// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the -// client's request for the UpdateTimeToLive operation. The "output" return +// UpdateGlobalTableSettingsRequest generates a "aws/request.Request" representing the +// client's request for the UpdateGlobalTableSettings operation. The "output" return // value will be populated with the request's response once the request completes // successfully. // // Use "Send" method on the returned Request to send the API call to the service. // the "output" return value is not valid until after Send returns without error. // -// See UpdateTimeToLive for more information on using the UpdateTimeToLive +// See UpdateGlobalTableSettings for more information on using the UpdateGlobalTableSettings // API call, and error handling. // // This method is useful when you want to inject custom logic or configuration // into the SDK's request lifecycle. Such as custom headers, or retry logic. // // -// // Example sending a request using the UpdateTimeToLiveRequest method. -// req, resp := client.UpdateTimeToLiveRequest(params) +// // Example sending a request using the UpdateGlobalTableSettingsRequest method. +// req, resp := client.UpdateGlobalTableSettingsRequest(params) // // err := req.Send() // if err == nil { // resp is now filled // fmt.Println(resp) // } // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive -func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettingsRequest(input *UpdateGlobalTableSettingsInput) (req *request.Request, output *UpdateGlobalTableSettingsOutput) { op := &request.Operation{ - Name: opUpdateTimeToLive, + Name: opUpdateGlobalTableSettings, HTTPMethod: "POST", HTTPPath: "/", } if input == nil { - input = &UpdateTimeToLiveInput{} + input = &UpdateGlobalTableSettingsInput{} } - output = &UpdateTimeToLiveOutput{} + output = &UpdateGlobalTableSettingsOutput{} req = c.newRequest(op, input, output) // if custom endpoint for the request is set to a non empty string, // we skip the endpoint discovery workflow. @@ -6206,53 +6440,26 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r return } -// UpdateTimeToLive API operation for Amazon DynamoDB. -// -// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the -// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. -// It can take up to one hour for the change to fully process. Any additional -// UpdateTimeToLive calls for the same table during this one hour duration result -// in a ValidationException. -// -// TTL compares the current time in epoch time format to the time stored in -// the TTL attribute of an item. If the epoch time value stored in the attribute -// is less than the current time, the item is marked as expired and subsequently -// deleted. -// -// The epoch time format is the number of seconds elapsed since 12:00:00 AM -// January 1, 1970 UTC. -// -// DynamoDB deletes expired items on a best-effort basis to ensure availability -// of throughput for other data operations. -// -// DynamoDB typically deletes expired items within two days of expiration. The -// exact duration within which an item gets deleted after expiration is specific -// to the nature of the workload. Items that have expired and not been deleted -// will still show up in reads, queries, and scans. -// -// As items are deleted, they are removed from any local secondary index and -// global secondary index immediately in the same eventually consistent way -// as a standard delete operation. +// UpdateGlobalTableSettings API operation for Amazon DynamoDB. // -// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) -// in the Amazon DynamoDB Developer Guide. +// Updates settings for a global table. // // Returns awserr.Error for service API and SDK errors. Use runtime type assertions // with awserr.Error's Code and Message methods to get detailed information about // the error. // // See the AWS API reference guide for Amazon DynamoDB's -// API operation UpdateTimeToLive for usage and error information. +// API operation UpdateGlobalTableSettings for usage and error information. // // Returned Error Types: -// * ResourceInUseException -// The operation conflicts with the resource's availability. For example, you -// attempted to recreate an existing table, or tried to delete a table currently -// in the CREATING state. +// * GlobalTableNotFoundException +// The specified global table does not exist. // -// * ResourceNotFoundException -// The operation tried to access a nonexistent table or index. The resource -// might not be specified correctly, or its status might not be ACTIVE. +// * ReplicaNotFoundException +// The specified replica is no longer part of the global table. +// +// * IndexNotFoundException +// The operation tried to access a nonexistent index. // // * LimitExceededException // There is no limit to the number of daily on-demand backups that can be taken. @@ -6268,90 +6475,636 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r // // There is a soft account quota of 256 tables. // +// * ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// // * InternalServerError // An error occurred on the server side. // -// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive -func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { - req, out := c.UpdateTimeToLiveRequest(input) +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateGlobalTableSettings +func (c *DynamoDB) UpdateGlobalTableSettings(input *UpdateGlobalTableSettingsInput) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) return out, req.Send() } -// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of +// UpdateGlobalTableSettingsWithContext is the same as UpdateGlobalTableSettings with the addition of // the ability to pass a context and additional request options. // -// See UpdateTimeToLive for details on how to use this API operation. +// See UpdateGlobalTableSettings for details on how to use this API operation. // // The context must be non-nil and will be used for request cancellation. If // the context is nil a panic will occur. In the future the SDK may create // sub-contexts for http.Requests. See https://golang.org/pkg/context/ // for more information on using Contexts. -func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) { - req, out := c.UpdateTimeToLiveRequest(input) +func (c *DynamoDB) UpdateGlobalTableSettingsWithContext(ctx aws.Context, input *UpdateGlobalTableSettingsInput, opts ...request.Option) (*UpdateGlobalTableSettingsOutput, error) { + req, out := c.UpdateGlobalTableSettingsRequest(input) req.SetContext(ctx) req.ApplyOptions(opts...) return out, req.Send() } -// Contains details of a table archival operation. -type ArchivalSummary struct { - _ struct{} `type:"structure"` - - // The Amazon Resource Name (ARN) of the backup the table was archived to, when - // applicable in the archival reason. If you wish to restore this backup to - // the same table name, you will need to delete the original table. - ArchivalBackupArn *string `min:"37" type:"string"` - - // The date and time when table archival was initiated by DynamoDB, in UNIX - // epoch time format. - ArchivalDateTime *time.Time `type:"timestamp"` - - // The reason DynamoDB archived the table. Currently, the only possible value - // is: - // - // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to - // the table's AWS KMS key being inaccessible for more than seven days. An - // On-Demand backup was created at the archival time. - ArchivalReason *string `type:"string"` -} +const opUpdateItem = "UpdateItem" -// String returns the string representation -func (s ArchivalSummary) String() string { - return awsutil.Prettify(s) -} +// UpdateItemRequest generates a "aws/request.Request" representing the +// client's request for the UpdateItem operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateItem for more information on using the UpdateItem +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateItemRequest method. +// req, resp := client.UpdateItemRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItemRequest(input *UpdateItemInput) (req *request.Request, output *UpdateItemOutput) { + op := &request.Operation{ + Name: opUpdateItem, + HTTPMethod: "POST", + HTTPPath: "/", + } -// GoString returns the string representation -func (s ArchivalSummary) GoString() string { - return s.String() -} + if input == nil { + input = &UpdateItemInput{} + } -// SetArchivalBackupArn sets the ArchivalBackupArn field's value. -func (s *ArchivalSummary) SetArchivalBackupArn(v string) *ArchivalSummary { - s.ArchivalBackupArn = &v - return s -} + output = &UpdateItemOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } -// SetArchivalDateTime sets the ArchivalDateTime field's value. -func (s *ArchivalSummary) SetArchivalDateTime(v time.Time) *ArchivalSummary { - s.ArchivalDateTime = &v - return s -} + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } -// SetArchivalReason sets the ArchivalReason field's value. -func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary { - s.ArchivalReason = &v - return s + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return } -// Represents an attribute for describing the key schema for the table and indexes. -type AttributeDefinition struct { - _ struct{} `type:"structure"` - - // A name for the attribute. - // - // AttributeName is a required field - AttributeName *string `min:"1" type:"string" required:"true"` - +// UpdateItem API operation for Amazon DynamoDB. +// +// Edits an existing item's attributes, or adds a new item to the table if it +// does not already exist. You can put, delete, or add attribute values. You +// can also perform a conditional update on an existing item (insert a new attribute +// name-value pair if it doesn't exist, or replace an existing name-value pair +// if it has certain expected attribute values). +// +// You can also return the item's attribute values in the same UpdateItem operation +// using the ReturnValues parameter. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateItem for usage and error information. +// +// Returned Error Types: +// * ConditionalCheckFailedException +// A condition specified in the operation could not be evaluated. +// +// * ProvisionedThroughputExceededException +// Your request rate is too high. The AWS SDKs for DynamoDB automatically retry +// requests that receive this exception. Your request is eventually successful, +// unless your retry queue is too large to finish. Reduce the frequency of requests +// and use exponential backoff. For more information, go to Error Retries and +// Exponential Backoff (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Programming.Errors.html#Programming.Errors.RetryAndBackoff) +// in the Amazon DynamoDB Developer Guide. +// +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ItemCollectionSizeLimitExceededException +// An item collection is too large. This exception is only returned for tables +// that have one or more local secondary indexes. +// +// * TransactionConflictException +// Operation was rejected because there is an ongoing transaction for the item. +// +// * RequestLimitExceeded +// Throughput exceeds the current throughput quota for your account. Please +// contact AWS Support at AWS Support (https://aws.amazon.com/support) to request +// a quota increase. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateItem +func (c *DynamoDB) UpdateItem(input *UpdateItemInput) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + return out, req.Send() +} + +// UpdateItemWithContext is the same as UpdateItem with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateItem for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateItemWithContext(ctx aws.Context, input *UpdateItemInput, opts ...request.Option) (*UpdateItemOutput, error) { + req, out := c.UpdateItemRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTable = "UpdateTable" + +// UpdateTableRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTable operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTable for more information on using the UpdateTable +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTableRequest method. +// req, resp := client.UpdateTableRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTableRequest(input *UpdateTableInput) (req *request.Request, output *UpdateTableOutput) { + op := &request.Operation{ + Name: opUpdateTable, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableInput{} + } + + output = &UpdateTableOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateTable API operation for Amazon DynamoDB. +// +// Modifies the provisioned throughput settings, global secondary indexes, or +// DynamoDB Streams settings for a given table. +// +// You can only perform one of the following operations at once: +// +// * Modify the provisioned throughput settings of the table. +// +// * Enable or disable DynamoDB Streams on the table. +// +// * Remove a global secondary index from the table. +// +// * Create a new global secondary index on the table. After the index begins +// backfilling, you can use UpdateTable to perform other operations. +// +// UpdateTable is an asynchronous operation; while it is executing, the table +// status changes from ACTIVE to UPDATING. While it is UPDATING, you cannot +// issue another UpdateTable request. When the table returns to the ACTIVE state, +// the UpdateTable operation is complete. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTable for usage and error information. +// +// Returned Error Types: +// * ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTable +func (c *DynamoDB) UpdateTable(input *UpdateTableInput) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + return out, req.Send() +} + +// UpdateTableWithContext is the same as UpdateTable with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTable for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableWithContext(ctx aws.Context, input *UpdateTableInput, opts ...request.Option) (*UpdateTableOutput, error) { + req, out := c.UpdateTableRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTableReplicaAutoScaling = "UpdateTableReplicaAutoScaling" + +// UpdateTableReplicaAutoScalingRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTableReplicaAutoScaling operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTableReplicaAutoScaling for more information on using the UpdateTableReplicaAutoScaling +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTableReplicaAutoScalingRequest method. +// req, resp := client.UpdateTableReplicaAutoScalingRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling +func (c *DynamoDB) UpdateTableReplicaAutoScalingRequest(input *UpdateTableReplicaAutoScalingInput) (req *request.Request, output *UpdateTableReplicaAutoScalingOutput) { + op := &request.Operation{ + Name: opUpdateTableReplicaAutoScaling, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTableReplicaAutoScalingInput{} + } + + output = &UpdateTableReplicaAutoScalingOutput{} + req = c.newRequest(op, input, output) + return +} + +// UpdateTableReplicaAutoScaling API operation for Amazon DynamoDB. +// +// Updates auto scaling settings on your global tables at once. +// +// This operation only applies to Version 2019.11.21 (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/globaltables.V2.html) +// of global tables. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTableReplicaAutoScaling for usage and error information. +// +// Returned Error Types: +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTableReplicaAutoScaling +func (c *DynamoDB) UpdateTableReplicaAutoScaling(input *UpdateTableReplicaAutoScalingInput) (*UpdateTableReplicaAutoScalingOutput, error) { + req, out := c.UpdateTableReplicaAutoScalingRequest(input) + return out, req.Send() +} + +// UpdateTableReplicaAutoScalingWithContext is the same as UpdateTableReplicaAutoScaling with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTableReplicaAutoScaling for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTableReplicaAutoScalingWithContext(ctx aws.Context, input *UpdateTableReplicaAutoScalingInput, opts ...request.Option) (*UpdateTableReplicaAutoScalingOutput, error) { + req, out := c.UpdateTableReplicaAutoScalingRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +const opUpdateTimeToLive = "UpdateTimeToLive" + +// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the +// client's request for the UpdateTimeToLive operation. The "output" return +// value will be populated with the request's response once the request completes +// successfully. +// +// Use "Send" method on the returned Request to send the API call to the service. +// the "output" return value is not valid until after Send returns without error. +// +// See UpdateTimeToLive for more information on using the UpdateTimeToLive +// API call, and error handling. +// +// This method is useful when you want to inject custom logic or configuration +// into the SDK's request lifecycle. Such as custom headers, or retry logic. +// +// +// // Example sending a request using the UpdateTimeToLiveRequest method. +// req, resp := client.UpdateTimeToLiveRequest(params) +// +// err := req.Send() +// if err == nil { // resp is now filled +// fmt.Println(resp) +// } +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *request.Request, output *UpdateTimeToLiveOutput) { + op := &request.Operation{ + Name: opUpdateTimeToLive, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &UpdateTimeToLiveInput{} + } + + output = &UpdateTimeToLiveOutput{} + req = c.newRequest(op, input, output) + // if custom endpoint for the request is set to a non empty string, + // we skip the endpoint discovery workflow. + if req.Config.Endpoint == nil || *req.Config.Endpoint == "" { + if aws.BoolValue(req.Config.EnableEndpointDiscovery) { + de := discovererDescribeEndpoints{ + Required: false, + EndpointCache: c.endpointCache, + Params: map[string]*string{ + "op": aws.String(req.Operation.Name), + }, + Client: c, + } + + for k, v := range de.Params { + if v == nil { + delete(de.Params, k) + } + } + + req.Handlers.Build.PushFrontNamed(request.NamedHandler{ + Name: "crr.endpointdiscovery", + Fn: de.Handler, + }) + } + } + return +} + +// UpdateTimeToLive API operation for Amazon DynamoDB. +// +// The UpdateTimeToLive method enables or disables Time to Live (TTL) for the +// specified table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification. +// It can take up to one hour for the change to fully process. Any additional +// UpdateTimeToLive calls for the same table during this one hour duration result +// in a ValidationException. +// +// TTL compares the current time in epoch time format to the time stored in +// the TTL attribute of an item. If the epoch time value stored in the attribute +// is less than the current time, the item is marked as expired and subsequently +// deleted. +// +// The epoch time format is the number of seconds elapsed since 12:00:00 AM +// January 1, 1970 UTC. +// +// DynamoDB deletes expired items on a best-effort basis to ensure availability +// of throughput for other data operations. +// +// DynamoDB typically deletes expired items within two days of expiration. The +// exact duration within which an item gets deleted after expiration is specific +// to the nature of the workload. Items that have expired and not been deleted +// will still show up in reads, queries, and scans. +// +// As items are deleted, they are removed from any local secondary index and +// global secondary index immediately in the same eventually consistent way +// as a standard delete operation. +// +// For more information, see Time To Live (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/TTL.html) +// in the Amazon DynamoDB Developer Guide. +// +// Returns awserr.Error for service API and SDK errors. Use runtime type assertions +// with awserr.Error's Code and Message methods to get detailed information about +// the error. +// +// See the AWS API reference guide for Amazon DynamoDB's +// API operation UpdateTimeToLive for usage and error information. +// +// Returned Error Types: +// * ResourceInUseException +// The operation conflicts with the resource's availability. For example, you +// attempted to recreate an existing table, or tried to delete a table currently +// in the CREATING state. +// +// * ResourceNotFoundException +// The operation tried to access a nonexistent table or index. The resource +// might not be specified correctly, or its status might not be ACTIVE. +// +// * LimitExceededException +// There is no limit to the number of daily on-demand backups that can be taken. +// +// Up to 50 simultaneous table operations are allowed per account. These operations +// include CreateTable, UpdateTable, DeleteTable,UpdateTimeToLive, RestoreTableFromBackup, +// and RestoreTableToPointInTime. +// +// The only exception is when you are creating a table with one or more secondary +// indexes. You can have up to 25 such requests running at a time; however, +// if the table or index specifications are complex, DynamoDB might temporarily +// reduce the number of concurrent operations. +// +// There is a soft account quota of 256 tables. +// +// * InternalServerError +// An error occurred on the server side. +// +// See also, https://docs.aws.amazon.com/goto/WebAPI/dynamodb-2012-08-10/UpdateTimeToLive +func (c *DynamoDB) UpdateTimeToLive(input *UpdateTimeToLiveInput) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + return out, req.Send() +} + +// UpdateTimeToLiveWithContext is the same as UpdateTimeToLive with the addition of +// the ability to pass a context and additional request options. +// +// See UpdateTimeToLive for details on how to use this API operation. +// +// The context must be non-nil and will be used for request cancellation. If +// the context is nil a panic will occur. In the future the SDK may create +// sub-contexts for http.Requests. See https://golang.org/pkg/context/ +// for more information on using Contexts. +func (c *DynamoDB) UpdateTimeToLiveWithContext(ctx aws.Context, input *UpdateTimeToLiveInput, opts ...request.Option) (*UpdateTimeToLiveOutput, error) { + req, out := c.UpdateTimeToLiveRequest(input) + req.SetContext(ctx) + req.ApplyOptions(opts...) + return out, req.Send() +} + +// Contains details of a table archival operation. +type ArchivalSummary struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) of the backup the table was archived to, when + // applicable in the archival reason. If you wish to restore this backup to + // the same table name, you will need to delete the original table. + ArchivalBackupArn *string `min:"37" type:"string"` + + // The date and time when table archival was initiated by DynamoDB, in UNIX + // epoch time format. + ArchivalDateTime *time.Time `type:"timestamp"` + + // The reason DynamoDB archived the table. Currently, the only possible value + // is: + // + // * INACCESSIBLE_ENCRYPTION_CREDENTIALS - The table was archived due to + // the table's AWS KMS key being inaccessible for more than seven days. An + // On-Demand backup was created at the archival time. + ArchivalReason *string `type:"string"` +} + +// String returns the string representation +func (s ArchivalSummary) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ArchivalSummary) GoString() string { + return s.String() +} + +// SetArchivalBackupArn sets the ArchivalBackupArn field's value. +func (s *ArchivalSummary) SetArchivalBackupArn(v string) *ArchivalSummary { + s.ArchivalBackupArn = &v + return s +} + +// SetArchivalDateTime sets the ArchivalDateTime field's value. +func (s *ArchivalSummary) SetArchivalDateTime(v time.Time) *ArchivalSummary { + s.ArchivalDateTime = &v + return s +} + +// SetArchivalReason sets the ArchivalReason field's value. +func (s *ArchivalSummary) SetArchivalReason(v string) *ArchivalSummary { + s.ArchivalReason = &v + return s +} + +// Represents an attribute for describing the key schema for the table and indexes. +type AttributeDefinition struct { + _ struct{} `type:"structure"` + + // A name for the attribute. + // + // AttributeName is a required field + AttributeName *string `min:"1" type:"string" required:"true"` + // The data type for the attribute, where: // // * S - the attribute is of type String @@ -7404,6 +8157,80 @@ func (s *BackupSummary) SetTableName(v string) *BackupSummary { return s } +type BatchExecuteStatementInput struct { + _ struct{} `type:"structure"` + + // The list of PartiQL statements representing the batch to run. + // + // Statements is a required field + Statements []*BatchStatementRequest `min:"1" type:"list" required:"true"` +} + +// String returns the string representation +func (s BatchExecuteStatementInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchExecuteStatementInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchExecuteStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchExecuteStatementInput"} + if s.Statements == nil { + invalidParams.Add(request.NewErrParamRequired("Statements")) + } + if s.Statements != nil && len(s.Statements) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statements", 1)) + } + if s.Statements != nil { + for i, v := range s.Statements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "Statements", i), err.(request.ErrInvalidParams)) + } + } + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatements sets the Statements field's value. +func (s *BatchExecuteStatementInput) SetStatements(v []*BatchStatementRequest) *BatchExecuteStatementInput { + s.Statements = v + return s +} + +type BatchExecuteStatementOutput struct { + _ struct{} `type:"structure"` + + // The response to each PartiQL statement in the batch. + Responses []*BatchStatementResponse `type:"list"` +} + +// String returns the string representation +func (s BatchExecuteStatementOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchExecuteStatementOutput) GoString() string { + return s.String() +} + +// SetResponses sets the Responses field's value. +func (s *BatchExecuteStatementOutput) SetResponses(v []*BatchStatementResponse) *BatchExecuteStatementOutput { + s.Responses = v + return s +} + // Represents the input of a BatchGetItem operation. type BatchGetItemInput struct { _ struct{} `type:"structure"` @@ -7536,59 +8363,197 @@ type BatchGetItemOutput struct { // * CapacityUnits - The total number of capacity units consumed. ConsumedCapacity []*ConsumedCapacity `type:"list"` - // A map of table name to a list of items. Each object in Responses consists - // of a table name, along with a map of attribute data consisting of the data - // type and attribute value. - Responses map[string][]map[string]*AttributeValue `type:"map"` + // A map of table name to a list of items. Each object in Responses consists + // of a table name, along with a map of attribute data consisting of the data + // type and attribute value. + Responses map[string][]map[string]*AttributeValue `type:"map"` + + // A map of tables and their respective keys that were not processed with the + // current response. The UnprocessedKeys value is in the same form as RequestItems, + // so the value can be provided directly to a subsequent BatchGetItem operation. + // For more information, see RequestItems in the Request Parameters section. + // + // Each element consists of: + // + // * Keys - An array of primary key attribute values that define specific + // items in the table. + // + // * ProjectionExpression - One or more attributes to be retrieved from the + // table or index. By default, all attributes are returned. If a requested + // attribute is not found, it does not appear in the result. + // + // * ConsistentRead - The consistency of a read operation. If set to true, + // then a strongly consistent read is used; otherwise, an eventually consistent + // read is used. + // + // If there are no unprocessed keys remaining, the response contains an empty + // UnprocessedKeys map. + UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +} + +// String returns the string representation +func (s BatchGetItemOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchGetItemOutput) GoString() string { + return s.String() +} + +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput { + s.ConsumedCapacity = v + return s +} + +// SetResponses sets the Responses field's value. +func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput { + s.Responses = v + return s +} + +// SetUnprocessedKeys sets the UnprocessedKeys field's value. +func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput { + s.UnprocessedKeys = v + return s +} + +// An error associated with a statement in a PartiQL batch that was run. +type BatchStatementError struct { + _ struct{} `type:"structure"` + + // The error code associated with the failed PartiQL batch statement. + Code *string `type:"string" enum:"BatchStatementErrorCodeEnum"` + + // The error message associated with the PartiQL batch resposne. + Message *string `type:"string"` +} + +// String returns the string representation +func (s BatchStatementError) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStatementError) GoString() string { + return s.String() +} + +// SetCode sets the Code field's value. +func (s *BatchStatementError) SetCode(v string) *BatchStatementError { + s.Code = &v + return s +} + +// SetMessage sets the Message field's value. +func (s *BatchStatementError) SetMessage(v string) *BatchStatementError { + s.Message = &v + return s +} + +// A PartiQL batch statement request. +type BatchStatementRequest struct { + _ struct{} `type:"structure"` + + // The read consistency of the PartiQL batch request. + ConsistentRead *bool `type:"boolean"` + + // The parameters associated with a PartiQL statement in the batch request. + Parameters []*AttributeValue `min:"1" type:"list"` + + // A valid PartiQL statement. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s BatchStatementRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s BatchStatementRequest) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *BatchStatementRequest) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "BatchStatementRequest"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConsistentRead sets the ConsistentRead field's value. +func (s *BatchStatementRequest) SetConsistentRead(v bool) *BatchStatementRequest { + s.ConsistentRead = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *BatchStatementRequest) SetParameters(v []*AttributeValue) *BatchStatementRequest { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *BatchStatementRequest) SetStatement(v string) *BatchStatementRequest { + s.Statement = &v + return s +} - // A map of tables and their respective keys that were not processed with the - // current response. The UnprocessedKeys value is in the same form as RequestItems, - // so the value can be provided directly to a subsequent BatchGetItem operation. - // For more information, see RequestItems in the Request Parameters section. - // - // Each element consists of: - // - // * Keys - An array of primary key attribute values that define specific - // items in the table. - // - // * ProjectionExpression - One or more attributes to be retrieved from the - // table or index. By default, all attributes are returned. If a requested - // attribute is not found, it does not appear in the result. - // - // * ConsistentRead - The consistency of a read operation. If set to true, - // then a strongly consistent read is used; otherwise, an eventually consistent - // read is used. - // - // If there are no unprocessed keys remaining, the response contains an empty - // UnprocessedKeys map. - UnprocessedKeys map[string]*KeysAndAttributes `min:"1" type:"map"` +// A PartiQL batch statement response.. +type BatchStatementResponse struct { + _ struct{} `type:"structure"` + + // The error associated with a failed PartiQL batch statement. + Error *BatchStatementError `type:"structure"` + + // A DynamoDB item associated with a BatchStatementResponse + Item map[string]*AttributeValue `type:"map"` + + // The table name associated with a failed PartiQL batch statement. + TableName *string `min:"3" type:"string"` } // String returns the string representation -func (s BatchGetItemOutput) String() string { +func (s BatchStatementResponse) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s BatchGetItemOutput) GoString() string { +func (s BatchStatementResponse) GoString() string { return s.String() } -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *BatchGetItemOutput) SetConsumedCapacity(v []*ConsumedCapacity) *BatchGetItemOutput { - s.ConsumedCapacity = v +// SetError sets the Error field's value. +func (s *BatchStatementResponse) SetError(v *BatchStatementError) *BatchStatementResponse { + s.Error = v return s } -// SetResponses sets the Responses field's value. -func (s *BatchGetItemOutput) SetResponses(v map[string][]map[string]*AttributeValue) *BatchGetItemOutput { - s.Responses = v +// SetItem sets the Item field's value. +func (s *BatchStatementResponse) SetItem(v map[string]*AttributeValue) *BatchStatementResponse { + s.Item = v return s } -// SetUnprocessedKeys sets the UnprocessedKeys field's value. -func (s *BatchGetItemOutput) SetUnprocessedKeys(v map[string]*KeysAndAttributes) *BatchGetItemOutput { - s.UnprocessedKeys = v +// SetTableName sets the TableName field's value. +func (s *BatchStatementResponse) SetTableName(v string) *BatchStatementResponse { + s.TableName = &v return s } @@ -9098,133 +10063,444 @@ func (s *CreateTableInput) Validate() error { return nil } -// SetAttributeDefinitions sets the AttributeDefinitions field's value. -func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput { - s.AttributeDefinitions = v - return s -} - -// SetBillingMode sets the BillingMode field's value. -func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { - s.BillingMode = &v - return s -} - -// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. -func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { - s.GlobalSecondaryIndexes = v - return s -} - -// SetKeySchema sets the KeySchema field's value. -func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput { - s.KeySchema = v - return s -} - -// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. -func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput { - s.LocalSecondaryIndexes = v +// SetAttributeDefinitions sets the AttributeDefinitions field's value. +func (s *CreateTableInput) SetAttributeDefinitions(v []*AttributeDefinition) *CreateTableInput { + s.AttributeDefinitions = v + return s +} + +// SetBillingMode sets the BillingMode field's value. +func (s *CreateTableInput) SetBillingMode(v string) *CreateTableInput { + s.BillingMode = &v + return s +} + +// SetGlobalSecondaryIndexes sets the GlobalSecondaryIndexes field's value. +func (s *CreateTableInput) SetGlobalSecondaryIndexes(v []*GlobalSecondaryIndex) *CreateTableInput { + s.GlobalSecondaryIndexes = v + return s +} + +// SetKeySchema sets the KeySchema field's value. +func (s *CreateTableInput) SetKeySchema(v []*KeySchemaElement) *CreateTableInput { + s.KeySchema = v + return s +} + +// SetLocalSecondaryIndexes sets the LocalSecondaryIndexes field's value. +func (s *CreateTableInput) SetLocalSecondaryIndexes(v []*LocalSecondaryIndex) *CreateTableInput { + s.LocalSecondaryIndexes = v + return s +} + +// SetProvisionedThroughput sets the ProvisionedThroughput field's value. +func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { + s.ProvisionedThroughput = v + return s +} + +// SetSSESpecification sets the SSESpecification field's value. +func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { + s.SSESpecification = v + return s +} + +// SetStreamSpecification sets the StreamSpecification field's value. +func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput { + s.StreamSpecification = v + return s +} + +// SetTableName sets the TableName field's value. +func (s *CreateTableInput) SetTableName(v string) *CreateTableInput { + s.TableName = &v + return s +} + +// SetTags sets the Tags field's value. +func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput { + s.Tags = v + return s +} + +// Represents the output of a CreateTable operation. +type CreateTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of the table. + TableDescription *TableDescription `type:"structure"` +} + +// String returns the string representation +func (s CreateTableOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s CreateTableOutput) GoString() string { + return s.String() +} + +// SetTableDescription sets the TableDescription field's value. +func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput { + s.TableDescription = v + return s +} + +// Represents a request to perform a DeleteItem operation. +type Delete struct { + _ struct{} `type:"structure"` + + // A condition that must be satisfied in order for a conditional delete to succeed. + ConditionExpression *string `type:"string"` + + // One or more substitution tokens for attribute names in an expression. + ExpressionAttributeNames map[string]*string `type:"map"` + + // One or more values that can be substituted in an expression. + ExpressionAttributeValues map[string]*AttributeValue `type:"map"` + + // The primary key of the item to be deleted. Each element consists of an attribute + // name and a value for that attribute. + // + // Key is a required field + Key map[string]*AttributeValue `type:"map" required:"true"` + + // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the + // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid + // values are: NONE and ALL_OLD. + ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + + // Name of the table in which the item to be deleted resides. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s Delete) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s Delete) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *Delete) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "Delete"} + if s.Key == nil { + invalidParams.Add(request.NewErrParamRequired("Key")) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetConditionExpression sets the ConditionExpression field's value. +func (s *Delete) SetConditionExpression(v string) *Delete { + s.ConditionExpression = &v + return s +} + +// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. +func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete { + s.ExpressionAttributeNames = v + return s +} + +// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. +func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete { + s.ExpressionAttributeValues = v + return s +} + +// SetKey sets the Key field's value. +func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete { + s.Key = v + return s +} + +// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. +func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete { + s.ReturnValuesOnConditionCheckFailure = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *Delete) SetTableName(v string) *Delete { + s.TableName = &v + return s +} + +type DeleteBackupInput struct { + _ struct{} `type:"structure"` + + // The ARN associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` +} + +// String returns the string representation +func (s DeleteBackupInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DeleteBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput { + s.BackupArn = &v return s } -// SetProvisionedThroughput sets the ProvisionedThroughput field's value. -func (s *CreateTableInput) SetProvisionedThroughput(v *ProvisionedThroughput) *CreateTableInput { - s.ProvisionedThroughput = v - return s -} +type DeleteBackupOutput struct { + _ struct{} `type:"structure"` -// SetSSESpecification sets the SSESpecification field's value. -func (s *CreateTableInput) SetSSESpecification(v *SSESpecification) *CreateTableInput { - s.SSESpecification = v - return s + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` } -// SetStreamSpecification sets the StreamSpecification field's value. -func (s *CreateTableInput) SetStreamSpecification(v *StreamSpecification) *CreateTableInput { - s.StreamSpecification = v - return s +// String returns the string representation +func (s DeleteBackupOutput) String() string { + return awsutil.Prettify(s) } -// SetTableName sets the TableName field's value. -func (s *CreateTableInput) SetTableName(v string) *CreateTableInput { - s.TableName = &v - return s +// GoString returns the string representation +func (s DeleteBackupOutput) GoString() string { + return s.String() } -// SetTags sets the Tags field's value. -func (s *CreateTableInput) SetTags(v []*Tag) *CreateTableInput { - s.Tags = v +// SetBackupDescription sets the BackupDescription field's value. +func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput { + s.BackupDescription = v return s } -// Represents the output of a CreateTable operation. -type CreateTableOutput struct { +// Represents a global secondary index to be deleted from an existing table. +type DeleteGlobalSecondaryIndexAction struct { _ struct{} `type:"structure"` - // Represents the properties of the table. - TableDescription *TableDescription `type:"structure"` + // The name of the global secondary index to be deleted. + // + // IndexName is a required field + IndexName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s CreateTableOutput) String() string { +func (s DeleteGlobalSecondaryIndexAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s CreateTableOutput) GoString() string { +func (s DeleteGlobalSecondaryIndexAction) GoString() string { return s.String() } -// SetTableDescription sets the TableDescription field's value. -func (s *CreateTableOutput) SetTableDescription(v *TableDescription) *CreateTableOutput { - s.TableDescription = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteGlobalSecondaryIndexAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} + if s.IndexName == nil { + invalidParams.Add(request.NewErrParamRequired("IndexName")) + } + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetIndexName sets the IndexName field's value. +func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction { + s.IndexName = &v return s } -// Represents a request to perform a DeleteItem operation. -type Delete struct { +// Represents the input of a DeleteItem operation. +type DeleteItemInput struct { _ struct{} `type:"structure"` - // A condition that must be satisfied in order for a conditional delete to succeed. + // A condition that must be satisfied in order for a conditional DeleteItem + // to succeed. + // + // An expression can contain any of the following: + // + // * Functions: attribute_exists | attribute_not_exists | attribute_type + // | contains | begins_with | size These function names are case-sensitive. + // + // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN + // + // * Logical operators: AND | OR | NOT + // + // For more information about condition expressions, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. ConditionExpression *string `type:"string"` - // One or more substitution tokens for attribute names in an expression. + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) + // in the Amazon DynamoDB Developer Guide. + ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` + + // This is a legacy parameter. Use ConditionExpression instead. For more information, + // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) + // in the Amazon DynamoDB Developer Guide. + Expected map[string]*ExpectedAttributeValue `type:"map"` + + // One or more substitution tokens for attribute names in an expression. The + // following are some use cases for using ExpressionAttributeNames: + // + // * To access an attribute whose name conflicts with a DynamoDB reserved + // word. + // + // * To create a placeholder for repeating occurrences of an attribute name + // in an expression. + // + // * To prevent special characters in an attribute name from being misinterpreted + // in an expression. + // + // Use the # character in an expression to dereference an attribute name. For + // example, consider the following attribute name: + // + // * Percentile + // + // The name of this attribute conflicts with a reserved word, so it cannot be + // used directly in an expression. (For the complete list of reserved words, + // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) + // in the Amazon DynamoDB Developer Guide). To work around this, you could specify + // the following for ExpressionAttributeNames: + // + // * {"#P":"Percentile"} + // + // You could then use this substitution in an expression, as in this example: + // + // * #P = :val + // + // Tokens that begin with the : character are expression attribute values, which + // are placeholders for the actual value at runtime. + // + // For more information on expression attribute names, see Specifying Item Attributes + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) + // in the Amazon DynamoDB Developer Guide. ExpressionAttributeNames map[string]*string `type:"map"` // One or more values that can be substituted in an expression. + // + // Use the : (colon) character in an expression to dereference an attribute + // value. For example, suppose that you wanted to check whether the value of + // the ProductStatus attribute was one of the following: + // + // Available | Backordered | Discontinued + // + // You would first need to specify ExpressionAttributeValues as follows: + // + // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} + // } + // + // You could then use these values in an expression, such as this: + // + // ProductStatus IN (:avail, :back, :disc) + // + // For more information on expression attribute values, see Condition Expressions + // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) + // in the Amazon DynamoDB Developer Guide. ExpressionAttributeValues map[string]*AttributeValue `type:"map"` - // The primary key of the item to be deleted. Each element consists of an attribute - // name and a value for that attribute. + // A map of attribute names to AttributeValue objects, representing the primary + // key of the item to delete. + // + // For the primary key, you must provide all of the attributes. For example, + // with a simple primary key, you only need to provide a value for the partition + // key. For a composite primary key, you must provide values for both the partition + // key and the sort key. // // Key is a required field Key map[string]*AttributeValue `type:"map" required:"true"` - // Use ReturnValuesOnConditionCheckFailure to get the item attributes if the - // Delete condition fails. For ReturnValuesOnConditionCheckFailure, the valid - // values are: NONE and ALL_OLD. - ReturnValuesOnConditionCheckFailure *string `type:"string" enum:"ReturnValuesOnConditionCheckFailure"` + // Determines the level of detail about provisioned throughput consumption that + // is returned in the response: + // + // * INDEXES - The response includes the aggregate ConsumedCapacity for the + // operation, together with ConsumedCapacity for each table and secondary + // index that was accessed. Note that some operations, such as GetItem and + // BatchGetItem, do not access any indexes at all. In these cases, specifying + // INDEXES will only return ConsumedCapacity information for table(s). + // + // * TOTAL - The response includes only the aggregate ConsumedCapacity for + // the operation. + // + // * NONE - No ConsumedCapacity details are included in the response. + ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` - // Name of the table in which the item to be deleted resides. + // Determines whether item collection metrics are returned. If set to SIZE, + // the response includes statistics about item collections, if any, that were + // modified during the operation are returned in the response. If set to NONE + // (the default), no statistics are returned. + ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` + + // Use ReturnValues if you want to get the item attributes as they appeared + // before they were deleted. For DeleteItem, the valid values are: + // + // * NONE - If ReturnValues is not specified, or if its value is NONE, then + // nothing is returned. (This setting is the default for ReturnValues.) + // + // * ALL_OLD - The content of the old item is returned. + // + // The ReturnValues parameter is used by several DynamoDB operations; however, + // DeleteItem does not recognize any values other than NONE or ALL_OLD. + ReturnValues *string `type:"string" enum:"ReturnValue"` + + // The name of the table from which to delete the item. // // TableName is a required field TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s Delete) String() string { +func (s DeleteItemInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Delete) GoString() string { +func (s DeleteItemInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *Delete) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "Delete"} +func (s *DeleteItemInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} if s.Key == nil { invalidParams.Add(request.NewErrParamRequired("Key")) } @@ -9241,134 +10517,196 @@ func (s *Delete) Validate() error { return nil } -// SetConditionExpression sets the ConditionExpression field's value. -func (s *Delete) SetConditionExpression(v string) *Delete { - s.ConditionExpression = &v +// SetConditionExpression sets the ConditionExpression field's value. +func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput { + s.ConditionExpression = &v + return s +} + +// SetConditionalOperator sets the ConditionalOperator field's value. +func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput { + s.ConditionalOperator = &v + return s +} + +// SetExpected sets the Expected field's value. +func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput { + s.Expected = v return s } // SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *Delete) SetExpressionAttributeNames(v map[string]*string) *Delete { +func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput { s.ExpressionAttributeNames = v return s } // SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *Delete) SetExpressionAttributeValues(v map[string]*AttributeValue) *Delete { +func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput { s.ExpressionAttributeValues = v return s } // SetKey sets the Key field's value. -func (s *Delete) SetKey(v map[string]*AttributeValue) *Delete { +func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput { s.Key = v return s } -// SetReturnValuesOnConditionCheckFailure sets the ReturnValuesOnConditionCheckFailure field's value. -func (s *Delete) SetReturnValuesOnConditionCheckFailure(v string) *Delete { - s.ReturnValuesOnConditionCheckFailure = &v +// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. +func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput { + s.ReturnConsumedCapacity = &v + return s +} + +// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. +func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput { + s.ReturnItemCollectionMetrics = &v + return s +} + +// SetReturnValues sets the ReturnValues field's value. +func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { + s.ReturnValues = &v return s } // SetTableName sets the TableName field's value. -func (s *Delete) SetTableName(v string) *Delete { +func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { s.TableName = &v return s } -type DeleteBackupInput struct { +// Represents the output of a DeleteItem operation. +type DeleteItemOutput struct { _ struct{} `type:"structure"` - // The ARN associated with the backup. + // A map of attribute names to AttributeValue objects, representing the item + // as it appeared before the DeleteItem operation. This map appears in the response + // only if ReturnValues was specified as ALL_OLD in the request. + Attributes map[string]*AttributeValue `type:"map"` + + // The capacity units consumed by the DeleteItem operation. The data returned + // includes the total provisioned throughput consumed, along with statistics + // for the table and any indexes involved in the operation. ConsumedCapacity + // is only returned if the ReturnConsumedCapacity parameter was specified. For + // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) + // in the Amazon DynamoDB Developer Guide. + ConsumedCapacity *ConsumedCapacity `type:"structure"` + + // Information about item collections, if any, that were affected by the DeleteItem + // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics + // parameter was specified. If the table does not have any local secondary indexes, + // this information is not returned in the response. // - // BackupArn is a required field - BackupArn *string `min:"37" type:"string" required:"true"` + // Each ItemCollectionMetrics element consists of: + // + // * ItemCollectionKey - The partition key value of the item collection. + // This is the same as the partition key value of the item itself. + // + // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. + // This value is a two-element array containing a lower bound and an upper + // bound for the estimate. The estimate includes the size of all the items + // in the table, plus the size of all attributes projected into all of the + // local secondary indexes on that table. Use this estimate to measure whether + // a local secondary index is approaching its size limit. The estimate is + // subject to change over time; therefore, do not rely on the precision or + // accuracy of the estimate. + ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` } // String returns the string representation -func (s DeleteBackupInput) String() string { +func (s DeleteItemOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBackupInput) GoString() string { +func (s DeleteItemOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteBackupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteBackupInput"} - if s.BackupArn == nil { - invalidParams.Add(request.NewErrParamRequired("BackupArn")) - } - if s.BackupArn != nil && len(*s.BackupArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) - } +// SetAttributes sets the Attributes field's value. +func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput { + s.Attributes = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetConsumedCapacity sets the ConsumedCapacity field's value. +func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput { + s.ConsumedCapacity = v + return s } -// SetBackupArn sets the BackupArn field's value. -func (s *DeleteBackupInput) SetBackupArn(v string) *DeleteBackupInput { - s.BackupArn = &v +// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. +func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput { + s.ItemCollectionMetrics = v return s } -type DeleteBackupOutput struct { +// Represents a replica to be removed. +type DeleteReplicaAction struct { _ struct{} `type:"structure"` - // Contains the description of the backup created for the table. - BackupDescription *BackupDescription `type:"structure"` + // The Region of the replica to be removed. + // + // RegionName is a required field + RegionName *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteBackupOutput) String() string { +func (s DeleteReplicaAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteBackupOutput) GoString() string { +func (s DeleteReplicaAction) GoString() string { return s.String() } -// SetBackupDescription sets the BackupDescription field's value. -func (s *DeleteBackupOutput) SetBackupDescription(v *BackupDescription) *DeleteBackupOutput { - s.BackupDescription = v +// Validate inspects the fields of the type to determine if they are valid. +func (s *DeleteReplicaAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction { + s.RegionName = &v return s } -// Represents a global secondary index to be deleted from an existing table. -type DeleteGlobalSecondaryIndexAction struct { +// Represents a replica to be deleted. +type DeleteReplicationGroupMemberAction struct { _ struct{} `type:"structure"` - // The name of the global secondary index to be deleted. + // The Region where the replica exists. // - // IndexName is a required field - IndexName *string `min:"3" type:"string" required:"true"` + // RegionName is a required field + RegionName *string `type:"string" required:"true"` } // String returns the string representation -func (s DeleteGlobalSecondaryIndexAction) String() string { +func (s DeleteReplicationGroupMemberAction) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteGlobalSecondaryIndexAction) GoString() string { +func (s DeleteReplicationGroupMemberAction) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteGlobalSecondaryIndexAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteGlobalSecondaryIndexAction"} - if s.IndexName == nil { - invalidParams.Add(request.NewErrParamRequired("IndexName")) - } - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) +func (s *DeleteReplicationGroupMemberAction) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupMemberAction"} + if s.RegionName == nil { + invalidParams.Add(request.NewErrParamRequired("RegionName")) } if invalidParams.Len() > 0 { @@ -9377,168 +10715,63 @@ func (s *DeleteGlobalSecondaryIndexAction) Validate() error { return nil } -// SetIndexName sets the IndexName field's value. -func (s *DeleteGlobalSecondaryIndexAction) SetIndexName(v string) *DeleteGlobalSecondaryIndexAction { - s.IndexName = &v - return s -} - -// Represents the input of a DeleteItem operation. -type DeleteItemInput struct { - _ struct{} `type:"structure"` - - // A condition that must be satisfied in order for a conditional DeleteItem - // to succeed. - // - // An expression can contain any of the following: - // - // * Functions: attribute_exists | attribute_not_exists | attribute_type - // | contains | begins_with | size These function names are case-sensitive. - // - // * Comparison operators: = | <> | < | > | <= | >= | BETWEEN | IN - // - // * Logical operators: AND | OR | NOT - // - // For more information about condition expressions, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ConditionExpression *string `type:"string"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see ConditionalOperator (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.ConditionalOperator.html) - // in the Amazon DynamoDB Developer Guide. - ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` - - // This is a legacy parameter. Use ConditionExpression instead. For more information, - // see Expected (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) - // in the Amazon DynamoDB Developer Guide. - Expected map[string]*ExpectedAttributeValue `type:"map"` - - // One or more substitution tokens for attribute names in an expression. The - // following are some use cases for using ExpressionAttributeNames: - // - // * To access an attribute whose name conflicts with a DynamoDB reserved - // word. - // - // * To create a placeholder for repeating occurrences of an attribute name - // in an expression. - // - // * To prevent special characters in an attribute name from being misinterpreted - // in an expression. - // - // Use the # character in an expression to dereference an attribute name. For - // example, consider the following attribute name: - // - // * Percentile - // - // The name of this attribute conflicts with a reserved word, so it cannot be - // used directly in an expression. (For the complete list of reserved words, - // see Reserved Words (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ReservedWords.html) - // in the Amazon DynamoDB Developer Guide). To work around this, you could specify - // the following for ExpressionAttributeNames: - // - // * {"#P":"Percentile"} - // - // You could then use this substitution in an expression, as in this example: - // - // * #P = :val - // - // Tokens that begin with the : character are expression attribute values, which - // are placeholders for the actual value at runtime. - // - // For more information on expression attribute names, see Specifying Item Attributes - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.AccessingItemAttributes.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeNames map[string]*string `type:"map"` - - // One or more values that can be substituted in an expression. - // - // Use the : (colon) character in an expression to dereference an attribute - // value. For example, suppose that you wanted to check whether the value of - // the ProductStatus attribute was one of the following: - // - // Available | Backordered | Discontinued - // - // You would first need to specify ExpressionAttributeValues as follows: - // - // { ":avail":{"S":"Available"}, ":back":{"S":"Backordered"}, ":disc":{"S":"Discontinued"} - // } - // - // You could then use these values in an expression, such as this: - // - // ProductStatus IN (:avail, :back, :disc) - // - // For more information on expression attribute values, see Condition Expressions - // (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Expressions.SpecifyingConditions.html) - // in the Amazon DynamoDB Developer Guide. - ExpressionAttributeValues map[string]*AttributeValue `type:"map"` +// SetRegionName sets the RegionName field's value. +func (s *DeleteReplicationGroupMemberAction) SetRegionName(v string) *DeleteReplicationGroupMemberAction { + s.RegionName = &v + return s +} - // A map of attribute names to AttributeValue objects, representing the primary - // key of the item to delete. - // - // For the primary key, you must provide all of the attributes. For example, - // with a simple primary key, you only need to provide a value for the partition - // key. For a composite primary key, you must provide values for both the partition - // key and the sort key. +// Represents a request to perform a DeleteItem operation on an item. +type DeleteRequest struct { + _ struct{} `type:"structure"` + + // A map of attribute name to attribute values, representing the primary key + // of the item to delete. All of the table's primary key attributes must be + // specified, and their data types must match those of the table's key schema. // // Key is a required field Key map[string]*AttributeValue `type:"map" required:"true"` +} - // Determines the level of detail about provisioned throughput consumption that - // is returned in the response: - // - // * INDEXES - The response includes the aggregate ConsumedCapacity for the - // operation, together with ConsumedCapacity for each table and secondary - // index that was accessed. Note that some operations, such as GetItem and - // BatchGetItem, do not access any indexes at all. In these cases, specifying - // INDEXES will only return ConsumedCapacity information for table(s). - // - // * TOTAL - The response includes only the aggregate ConsumedCapacity for - // the operation. - // - // * NONE - No ConsumedCapacity details are included in the response. - ReturnConsumedCapacity *string `type:"string" enum:"ReturnConsumedCapacity"` +// String returns the string representation +func (s DeleteRequest) String() string { + return awsutil.Prettify(s) +} - // Determines whether item collection metrics are returned. If set to SIZE, - // the response includes statistics about item collections, if any, that were - // modified during the operation are returned in the response. If set to NONE - // (the default), no statistics are returned. - ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` +// GoString returns the string representation +func (s DeleteRequest) GoString() string { + return s.String() +} - // Use ReturnValues if you want to get the item attributes as they appeared - // before they were deleted. For DeleteItem, the valid values are: - // - // * NONE - If ReturnValues is not specified, or if its value is NONE, then - // nothing is returned. (This setting is the default for ReturnValues.) - // - // * ALL_OLD - The content of the old item is returned. - // - // The ReturnValues parameter is used by several DynamoDB operations; however, - // DeleteItem does not recognize any values other than NONE or ALL_OLD. - ReturnValues *string `type:"string" enum:"ReturnValue"` +// SetKey sets the Key field's value. +func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { + s.Key = v + return s +} - // The name of the table from which to delete the item. +// Represents the input of a DeleteTable operation. +type DeleteTableInput struct { + _ struct{} `type:"structure"` + + // The name of the table to delete. // // TableName is a required field TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DeleteItemInput) String() string { +func (s DeleteTableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteItemInput) GoString() string { +func (s DeleteTableInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteItemInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteItemInput"} - if s.Key == nil { - invalidParams.Add(request.NewErrParamRequired("Key")) - } +func (s *DeleteTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } @@ -9552,157 +10785,199 @@ func (s *DeleteItemInput) Validate() error { return nil } -// SetConditionExpression sets the ConditionExpression field's value. -func (s *DeleteItemInput) SetConditionExpression(v string) *DeleteItemInput { - s.ConditionExpression = &v +// SetTableName sets the TableName field's value. +func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput { + s.TableName = &v return s } -// SetConditionalOperator sets the ConditionalOperator field's value. -func (s *DeleteItemInput) SetConditionalOperator(v string) *DeleteItemInput { - s.ConditionalOperator = &v - return s +// Represents the output of a DeleteTable operation. +type DeleteTableOutput struct { + _ struct{} `type:"structure"` + + // Represents the properties of a table. + TableDescription *TableDescription `type:"structure"` } -// SetExpected sets the Expected field's value. -func (s *DeleteItemInput) SetExpected(v map[string]*ExpectedAttributeValue) *DeleteItemInput { - s.Expected = v - return s +// String returns the string representation +func (s DeleteTableOutput) String() string { + return awsutil.Prettify(s) } -// SetExpressionAttributeNames sets the ExpressionAttributeNames field's value. -func (s *DeleteItemInput) SetExpressionAttributeNames(v map[string]*string) *DeleteItemInput { - s.ExpressionAttributeNames = v - return s +// GoString returns the string representation +func (s DeleteTableOutput) GoString() string { + return s.String() } -// SetExpressionAttributeValues sets the ExpressionAttributeValues field's value. -func (s *DeleteItemInput) SetExpressionAttributeValues(v map[string]*AttributeValue) *DeleteItemInput { - s.ExpressionAttributeValues = v +// SetTableDescription sets the TableDescription field's value. +func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput { + s.TableDescription = v return s } -// SetKey sets the Key field's value. -func (s *DeleteItemInput) SetKey(v map[string]*AttributeValue) *DeleteItemInput { - s.Key = v - return s +type DescribeBackupInput struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN) associated with the backup. + // + // BackupArn is a required field + BackupArn *string `min:"37" type:"string" required:"true"` } -// SetReturnConsumedCapacity sets the ReturnConsumedCapacity field's value. -func (s *DeleteItemInput) SetReturnConsumedCapacity(v string) *DeleteItemInput { - s.ReturnConsumedCapacity = &v - return s +// String returns the string representation +func (s DescribeBackupInput) String() string { + return awsutil.Prettify(s) } -// SetReturnItemCollectionMetrics sets the ReturnItemCollectionMetrics field's value. -func (s *DeleteItemInput) SetReturnItemCollectionMetrics(v string) *DeleteItemInput { - s.ReturnItemCollectionMetrics = &v +// GoString returns the string representation +func (s DescribeBackupInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeBackupInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"} + if s.BackupArn == nil { + invalidParams.Add(request.NewErrParamRequired("BackupArn")) + } + if s.BackupArn != nil && len(*s.BackupArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetBackupArn sets the BackupArn field's value. +func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput { + s.BackupArn = &v return s } -// SetReturnValues sets the ReturnValues field's value. -func (s *DeleteItemInput) SetReturnValues(v string) *DeleteItemInput { - s.ReturnValues = &v +type DescribeBackupOutput struct { + _ struct{} `type:"structure"` + + // Contains the description of the backup created for the table. + BackupDescription *BackupDescription `type:"structure"` +} + +// String returns the string representation +func (s DescribeBackupOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeBackupOutput) GoString() string { + return s.String() +} + +// SetBackupDescription sets the BackupDescription field's value. +func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput { + s.BackupDescription = v return s } +type DescribeContinuousBackupsInput struct { + _ struct{} `type:"structure"` + + // Name of the table for which the customer wants to check the continuous backups + // and point in time recovery settings. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` +} + +// String returns the string representation +func (s DescribeContinuousBackupsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeContinuousBackupsInput) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeContinuousBackupsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + // SetTableName sets the TableName field's value. -func (s *DeleteItemInput) SetTableName(v string) *DeleteItemInput { +func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput { s.TableName = &v return s } -// Represents the output of a DeleteItem operation. -type DeleteItemOutput struct { +type DescribeContinuousBackupsOutput struct { _ struct{} `type:"structure"` - // A map of attribute names to AttributeValue objects, representing the item - // as it appeared before the DeleteItem operation. This map appears in the response - // only if ReturnValues was specified as ALL_OLD in the request. - Attributes map[string]*AttributeValue `type:"map"` - - // The capacity units consumed by the DeleteItem operation. The data returned - // includes the total provisioned throughput consumed, along with statistics - // for the table and any indexes involved in the operation. ConsumedCapacity - // is only returned if the ReturnConsumedCapacity parameter was specified. For - // more information, see Provisioned Mode (https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/ProvisionedThroughputIntro.html) - // in the Amazon DynamoDB Developer Guide. - ConsumedCapacity *ConsumedCapacity `type:"structure"` - - // Information about item collections, if any, that were affected by the DeleteItem - // operation. ItemCollectionMetrics is only returned if the ReturnItemCollectionMetrics - // parameter was specified. If the table does not have any local secondary indexes, - // this information is not returned in the response. - // - // Each ItemCollectionMetrics element consists of: - // - // * ItemCollectionKey - The partition key value of the item collection. - // This is the same as the partition key value of the item itself. - // - // * SizeEstimateRangeGB - An estimate of item collection size, in gigabytes. - // This value is a two-element array containing a lower bound and an upper - // bound for the estimate. The estimate includes the size of all the items - // in the table, plus the size of all attributes projected into all of the - // local secondary indexes on that table. Use this estimate to measure whether - // a local secondary index is approaching its size limit. The estimate is - // subject to change over time; therefore, do not rely on the precision or - // accuracy of the estimate. - ItemCollectionMetrics *ItemCollectionMetrics `type:"structure"` + // Represents the continuous backups and point in time recovery settings on + // the table. + ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` } // String returns the string representation -func (s DeleteItemOutput) String() string { +func (s DescribeContinuousBackupsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteItemOutput) GoString() string { +func (s DescribeContinuousBackupsOutput) GoString() string { return s.String() } -// SetAttributes sets the Attributes field's value. -func (s *DeleteItemOutput) SetAttributes(v map[string]*AttributeValue) *DeleteItemOutput { - s.Attributes = v - return s -} - -// SetConsumedCapacity sets the ConsumedCapacity field's value. -func (s *DeleteItemOutput) SetConsumedCapacity(v *ConsumedCapacity) *DeleteItemOutput { - s.ConsumedCapacity = v - return s -} - -// SetItemCollectionMetrics sets the ItemCollectionMetrics field's value. -func (s *DeleteItemOutput) SetItemCollectionMetrics(v *ItemCollectionMetrics) *DeleteItemOutput { - s.ItemCollectionMetrics = v +// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. +func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput { + s.ContinuousBackupsDescription = v return s } -// Represents a replica to be removed. -type DeleteReplicaAction struct { +type DescribeContributorInsightsInput struct { _ struct{} `type:"structure"` - // The Region of the replica to be removed. + // The name of the global secondary index to describe, if applicable. + IndexName *string `min:"3" type:"string"` + + // The name of the table to describe. // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DeleteReplicaAction) String() string { +func (s DescribeContributorInsightsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReplicaAction) GoString() string { +func (s DescribeContributorInsightsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReplicaAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReplicaAction"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) +func (s *DescribeContributorInsightsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeContributorInsightsInput"} + if s.IndexName != nil && len(*s.IndexName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) } if invalidParams.Len() > 0 { @@ -9711,107 +10986,167 @@ func (s *DeleteReplicaAction) Validate() error { return nil } -// SetRegionName sets the RegionName field's value. -func (s *DeleteReplicaAction) SetRegionName(v string) *DeleteReplicaAction { - s.RegionName = &v +// SetIndexName sets the IndexName field's value. +func (s *DescribeContributorInsightsInput) SetIndexName(v string) *DescribeContributorInsightsInput { + s.IndexName = &v return s } -// Represents a replica to be deleted. -type DeleteReplicationGroupMemberAction struct { +// SetTableName sets the TableName field's value. +func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContributorInsightsInput { + s.TableName = &v + return s +} + +type DescribeContributorInsightsOutput struct { _ struct{} `type:"structure"` - // The Region where the replica exists. + // List of names of the associated Alpine rules. + ContributorInsightsRuleList []*string `type:"list"` + + // Current Status contributor insights. + ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` + + // Returns information about the last failure that encountered. // - // RegionName is a required field - RegionName *string `type:"string" required:"true"` + // The most common exceptions for a FAILED status are: + // + // * LimitExceededException - Per-account Amazon CloudWatch Contributor Insights + // rule limit reached. Please disable Contributor Insights for other tables/indexes + // OR disable Contributor Insights rules before retrying. + // + // * AccessDeniedException - Amazon CloudWatch Contributor Insights rules + // cannot be modified due to insufficient permissions. + // + // * AccessDeniedException - Failed to create service-linked role for Contributor + // Insights due to insufficient permissions. + // + // * InternalServerError - Failed to create Amazon CloudWatch Contributor + // Insights rules. Please retry request. + FailureException *FailureException `type:"structure"` + + // The name of the global secondary index being described. + IndexName *string `min:"3" type:"string"` + + // Timestamp of the last time the status was changed. + LastUpdateDateTime *time.Time `type:"timestamp"` + + // The name of the table being described. + TableName *string `min:"3" type:"string"` } // String returns the string representation -func (s DeleteReplicationGroupMemberAction) String() string { +func (s DescribeContributorInsightsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteReplicationGroupMemberAction) GoString() string { +func (s DescribeContributorInsightsOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteReplicationGroupMemberAction) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteReplicationGroupMemberAction"} - if s.RegionName == nil { - invalidParams.Add(request.NewErrParamRequired("RegionName")) - } +// SetContributorInsightsRuleList sets the ContributorInsightsRuleList field's value. +func (s *DescribeContributorInsightsOutput) SetContributorInsightsRuleList(v []*string) *DescribeContributorInsightsOutput { + s.ContributorInsightsRuleList = v + return s +} - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. +func (s *DescribeContributorInsightsOutput) SetContributorInsightsStatus(v string) *DescribeContributorInsightsOutput { + s.ContributorInsightsStatus = &v + return s } -// SetRegionName sets the RegionName field's value. -func (s *DeleteReplicationGroupMemberAction) SetRegionName(v string) *DeleteReplicationGroupMemberAction { - s.RegionName = &v +// SetFailureException sets the FailureException field's value. +func (s *DescribeContributorInsightsOutput) SetFailureException(v *FailureException) *DescribeContributorInsightsOutput { + s.FailureException = v return s } -// Represents a request to perform a DeleteItem operation on an item. -type DeleteRequest struct { +// SetIndexName sets the IndexName field's value. +func (s *DescribeContributorInsightsOutput) SetIndexName(v string) *DescribeContributorInsightsOutput { + s.IndexName = &v + return s +} + +// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. +func (s *DescribeContributorInsightsOutput) SetLastUpdateDateTime(v time.Time) *DescribeContributorInsightsOutput { + s.LastUpdateDateTime = &v + return s +} + +// SetTableName sets the TableName field's value. +func (s *DescribeContributorInsightsOutput) SetTableName(v string) *DescribeContributorInsightsOutput { + s.TableName = &v + return s +} + +type DescribeEndpointsInput struct { _ struct{} `type:"structure"` +} - // A map of attribute name to attribute values, representing the primary key - // of the item to delete. All of the table's primary key attributes must be - // specified, and their data types must match those of the table's key schema. +// String returns the string representation +func (s DescribeEndpointsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeEndpointsInput) GoString() string { + return s.String() +} + +type DescribeEndpointsOutput struct { + _ struct{} `type:"structure"` + + // List of endpoints. // - // Key is a required field - Key map[string]*AttributeValue `type:"map" required:"true"` + // Endpoints is a required field + Endpoints []*Endpoint `type:"list" required:"true"` } // String returns the string representation -func (s DeleteRequest) String() string { +func (s DescribeEndpointsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteRequest) GoString() string { +func (s DescribeEndpointsOutput) GoString() string { return s.String() } -// SetKey sets the Key field's value. -func (s *DeleteRequest) SetKey(v map[string]*AttributeValue) *DeleteRequest { - s.Key = v +// SetEndpoints sets the Endpoints field's value. +func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { + s.Endpoints = v return s } -// Represents the input of a DeleteTable operation. -type DeleteTableInput struct { +type DescribeExportInput struct { _ struct{} `type:"structure"` - // The name of the table to delete. + // The Amazon Resource Name (ARN) associated with the export. // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + // ExportArn is a required field + ExportArn *string `min:"37" type:"string" required:"true"` } // String returns the string representation -func (s DeleteTableInput) String() string { +func (s DescribeExportInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTableInput) GoString() string { +func (s DescribeExportInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteTableInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *DescribeExportInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} + if s.ExportArn == nil { + invalidParams.Add(request.NewErrParamRequired("ExportArn")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.ExportArn != nil && len(*s.ExportArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) } if invalidParams.Len() > 0 { @@ -9820,63 +11155,62 @@ func (s *DeleteTableInput) Validate() error { return nil } -// SetTableName sets the TableName field's value. -func (s *DeleteTableInput) SetTableName(v string) *DeleteTableInput { - s.TableName = &v +// SetExportArn sets the ExportArn field's value. +func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { + s.ExportArn = &v return s } -// Represents the output of a DeleteTable operation. -type DeleteTableOutput struct { +type DescribeExportOutput struct { _ struct{} `type:"structure"` - // Represents the properties of a table. - TableDescription *TableDescription `type:"structure"` + // Represents the properties of the export. + ExportDescription *ExportDescription `type:"structure"` } // String returns the string representation -func (s DeleteTableOutput) String() string { +func (s DescribeExportOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DeleteTableOutput) GoString() string { +func (s DescribeExportOutput) GoString() string { return s.String() } -// SetTableDescription sets the TableDescription field's value. -func (s *DeleteTableOutput) SetTableDescription(v *TableDescription) *DeleteTableOutput { - s.TableDescription = v +// SetExportDescription sets the ExportDescription field's value. +func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { + s.ExportDescription = v return s } -type DescribeBackupInput struct { +type DescribeGlobalTableInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) associated with the backup. + // The name of the global table. // - // BackupArn is a required field - BackupArn *string `min:"37" type:"string" required:"true"` + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeBackupInput) String() string { +func (s DescribeGlobalTableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeBackupInput) GoString() string { +func (s DescribeGlobalTableInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeBackupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeBackupInput"} - if s.BackupArn == nil { - invalidParams.Add(request.NewErrParamRequired("BackupArn")) +func (s *DescribeGlobalTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) } - if s.BackupArn != nil && len(*s.BackupArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("BackupArn", 37)) + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) } if invalidParams.Len() > 0 { @@ -9885,63 +11219,62 @@ func (s *DescribeBackupInput) Validate() error { return nil } -// SetBackupArn sets the BackupArn field's value. -func (s *DescribeBackupInput) SetBackupArn(v string) *DescribeBackupInput { - s.BackupArn = &v +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput { + s.GlobalTableName = &v return s } -type DescribeBackupOutput struct { +type DescribeGlobalTableOutput struct { _ struct{} `type:"structure"` - // Contains the description of the backup created for the table. - BackupDescription *BackupDescription `type:"structure"` + // Contains the details of the global table. + GlobalTableDescription *GlobalTableDescription `type:"structure"` } // String returns the string representation -func (s DescribeBackupOutput) String() string { +func (s DescribeGlobalTableOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeBackupOutput) GoString() string { +func (s DescribeGlobalTableOutput) GoString() string { return s.String() } -// SetBackupDescription sets the BackupDescription field's value. -func (s *DescribeBackupOutput) SetBackupDescription(v *BackupDescription) *DescribeBackupOutput { - s.BackupDescription = v +// SetGlobalTableDescription sets the GlobalTableDescription field's value. +func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput { + s.GlobalTableDescription = v return s } -type DescribeContinuousBackupsInput struct { +type DescribeGlobalTableSettingsInput struct { _ struct{} `type:"structure"` - // Name of the table for which the customer wants to check the continuous backups - // and point in time recovery settings. + // The name of the global table to describe. // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + // GlobalTableName is a required field + GlobalTableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeContinuousBackupsInput) String() string { +func (s DescribeGlobalTableSettingsInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeContinuousBackupsInput) GoString() string { +func (s DescribeGlobalTableSettingsInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeContinuousBackupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeContinuousBackupsInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *DescribeGlobalTableSettingsInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"} + if s.GlobalTableName == nil { + invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) } if invalidParams.Len() > 0 { @@ -9950,64 +11283,66 @@ func (s *DescribeContinuousBackupsInput) Validate() error { return nil } -// SetTableName sets the TableName field's value. -func (s *DescribeContinuousBackupsInput) SetTableName(v string) *DescribeContinuousBackupsInput { - s.TableName = &v +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput { + s.GlobalTableName = &v return s } -type DescribeContinuousBackupsOutput struct { +type DescribeGlobalTableSettingsOutput struct { _ struct{} `type:"structure"` - // Represents the continuous backups and point in time recovery settings on - // the table. - ContinuousBackupsDescription *ContinuousBackupsDescription `type:"structure"` + // The name of the global table. + GlobalTableName *string `min:"3" type:"string"` + + // The Region-specific settings for the global table. + ReplicaSettings []*ReplicaSettingsDescription `type:"list"` } // String returns the string representation -func (s DescribeContinuousBackupsOutput) String() string { +func (s DescribeGlobalTableSettingsOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeContinuousBackupsOutput) GoString() string { +func (s DescribeGlobalTableSettingsOutput) GoString() string { return s.String() } -// SetContinuousBackupsDescription sets the ContinuousBackupsDescription field's value. -func (s *DescribeContinuousBackupsOutput) SetContinuousBackupsDescription(v *ContinuousBackupsDescription) *DescribeContinuousBackupsOutput { - s.ContinuousBackupsDescription = v +// SetGlobalTableName sets the GlobalTableName field's value. +func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput { + s.GlobalTableName = &v return s } -type DescribeContributorInsightsInput struct { - _ struct{} `type:"structure"` +// SetReplicaSettings sets the ReplicaSettings field's value. +func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput { + s.ReplicaSettings = v + return s +} - // The name of the global secondary index to describe, if applicable. - IndexName *string `min:"3" type:"string"` +type DescribeKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` - // The name of the table to describe. + // The name of the table being described. // // TableName is a required field TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeContributorInsightsInput) String() string { +func (s DescribeKinesisStreamingDestinationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeContributorInsightsInput) GoString() string { +func (s DescribeKinesisStreamingDestinationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeContributorInsightsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeContributorInsightsInput"} - if s.IndexName != nil && len(*s.IndexName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("IndexName", 3)) - } +func (s *DescribeKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeKinesisStreamingDestinationInput"} if s.TableName == nil { invalidParams.Add(request.NewErrParamRequired("TableName")) } @@ -10021,167 +11356,209 @@ func (s *DescribeContributorInsightsInput) Validate() error { return nil } -// SetIndexName sets the IndexName field's value. -func (s *DescribeContributorInsightsInput) SetIndexName(v string) *DescribeContributorInsightsInput { - s.IndexName = &v - return s -} - // SetTableName sets the TableName field's value. -func (s *DescribeContributorInsightsInput) SetTableName(v string) *DescribeContributorInsightsInput { +func (s *DescribeKinesisStreamingDestinationInput) SetTableName(v string) *DescribeKinesisStreamingDestinationInput { s.TableName = &v return s } -type DescribeContributorInsightsOutput struct { +type DescribeKinesisStreamingDestinationOutput struct { _ struct{} `type:"structure"` - // List of names of the associated Alpine rules. - ContributorInsightsRuleList []*string `type:"list"` - - // Current Status contributor insights. - ContributorInsightsStatus *string `type:"string" enum:"ContributorInsightsStatus"` - - // Returns information about the last failure that encountered. - // - // The most common exceptions for a FAILED status are: - // - // * LimitExceededException - Per-account Amazon CloudWatch Contributor Insights - // rule limit reached. Please disable Contributor Insights for other tables/indexes - // OR disable Contributor Insights rules before retrying. - // - // * AccessDeniedException - Amazon CloudWatch Contributor Insights rules - // cannot be modified due to insufficient permissions. - // - // * AccessDeniedException - Failed to create service-linked role for Contributor - // Insights due to insufficient permissions. - // - // * InternalServerError - Failed to create Amazon CloudWatch Contributor - // Insights rules. Please retry request. - FailureException *FailureException `type:"structure"` - - // The name of the global secondary index being described. - IndexName *string `min:"3" type:"string"` - - // Timestamp of the last time the status was changed. - LastUpdateDateTime *time.Time `type:"timestamp"` + // The list of replica structures for the table being described. + KinesisDataStreamDestinations []*KinesisDataStreamDestination `type:"list"` // The name of the table being described. TableName *string `min:"3" type:"string"` } // String returns the string representation -func (s DescribeContributorInsightsOutput) String() string { +func (s DescribeKinesisStreamingDestinationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeContributorInsightsOutput) GoString() string { +func (s DescribeKinesisStreamingDestinationOutput) GoString() string { return s.String() } -// SetContributorInsightsRuleList sets the ContributorInsightsRuleList field's value. -func (s *DescribeContributorInsightsOutput) SetContributorInsightsRuleList(v []*string) *DescribeContributorInsightsOutput { - s.ContributorInsightsRuleList = v +// SetKinesisDataStreamDestinations sets the KinesisDataStreamDestinations field's value. +func (s *DescribeKinesisStreamingDestinationOutput) SetKinesisDataStreamDestinations(v []*KinesisDataStreamDestination) *DescribeKinesisStreamingDestinationOutput { + s.KinesisDataStreamDestinations = v return s } -// SetContributorInsightsStatus sets the ContributorInsightsStatus field's value. -func (s *DescribeContributorInsightsOutput) SetContributorInsightsStatus(v string) *DescribeContributorInsightsOutput { - s.ContributorInsightsStatus = &v +// SetTableName sets the TableName field's value. +func (s *DescribeKinesisStreamingDestinationOutput) SetTableName(v string) *DescribeKinesisStreamingDestinationOutput { + s.TableName = &v return s } -// SetFailureException sets the FailureException field's value. -func (s *DescribeContributorInsightsOutput) SetFailureException(v *FailureException) *DescribeContributorInsightsOutput { - s.FailureException = v +// Represents the input of a DescribeLimits operation. Has no content. +type DescribeLimitsInput struct { + _ struct{} `type:"structure"` +} + +// String returns the string representation +func (s DescribeLimitsInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLimitsInput) GoString() string { + return s.String() +} + +// Represents the output of a DescribeLimits operation. +type DescribeLimitsOutput struct { + _ struct{} `type:"structure"` + + // The maximum total read capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum total write capacity units that your account allows you to provision + // across all of your tables in this Region. + AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum read capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the read + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` + + // The maximum write capacity units that your account allows you to provision + // for a new table that you are creating in this Region, including the write + // capacity units provisioned for its global secondary indexes (GSIs). + TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +} + +// String returns the string representation +func (s DescribeLimitsOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeLimitsOutput) GoString() string { + return s.String() +} + +// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxReadCapacityUnits = &v return s } -// SetIndexName sets the IndexName field's value. -func (s *DescribeContributorInsightsOutput) SetIndexName(v string) *DescribeContributorInsightsOutput { - s.IndexName = &v +// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.AccountMaxWriteCapacityUnits = &v return s } -// SetLastUpdateDateTime sets the LastUpdateDateTime field's value. -func (s *DescribeContributorInsightsOutput) SetLastUpdateDateTime(v time.Time) *DescribeContributorInsightsOutput { - s.LastUpdateDateTime = &v +// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxReadCapacityUnits = &v return s } -// SetTableName sets the TableName field's value. -func (s *DescribeContributorInsightsOutput) SetTableName(v string) *DescribeContributorInsightsOutput { - s.TableName = &v +// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value. +func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { + s.TableMaxWriteCapacityUnits = &v return s } -type DescribeEndpointsInput struct { +// Represents the input of a DescribeTable operation. +type DescribeTableInput struct { _ struct{} `type:"structure"` + + // The name of the table to describe. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeEndpointsInput) String() string { +func (s DescribeTableInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEndpointsInput) GoString() string { +func (s DescribeTableInput) GoString() string { return s.String() } -type DescribeEndpointsOutput struct { +// Validate inspects the fields of the type to determine if they are valid. +func (s *DescribeTableInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetTableName sets the TableName field's value. +func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput { + s.TableName = &v + return s +} + +// Represents the output of a DescribeTable operation. +type DescribeTableOutput struct { _ struct{} `type:"structure"` - // List of endpoints. - // - // Endpoints is a required field - Endpoints []*Endpoint `type:"list" required:"true"` + // The properties of the table. + Table *TableDescription `type:"structure"` } // String returns the string representation -func (s DescribeEndpointsOutput) String() string { +func (s DescribeTableOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeEndpointsOutput) GoString() string { +func (s DescribeTableOutput) GoString() string { return s.String() } -// SetEndpoints sets the Endpoints field's value. -func (s *DescribeEndpointsOutput) SetEndpoints(v []*Endpoint) *DescribeEndpointsOutput { - s.Endpoints = v +// SetTable sets the Table field's value. +func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput { + s.Table = v return s } -type DescribeExportInput struct { +type DescribeTableReplicaAutoScalingInput struct { _ struct{} `type:"structure"` - // The Amazon Resource Name (ARN) associated with the export. + // The name of the table. // - // ExportArn is a required field - ExportArn *string `min:"37" type:"string" required:"true"` + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeExportInput) String() string { +func (s DescribeTableReplicaAutoScalingInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeExportInput) GoString() string { +func (s DescribeTableReplicaAutoScalingInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeExportInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeExportInput"} - if s.ExportArn == nil { - invalidParams.Add(request.NewErrParamRequired("ExportArn")) +func (s *DescribeTableReplicaAutoScalingInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTableReplicaAutoScalingInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.ExportArn != nil && len(*s.ExportArn) < 37 { - invalidParams.Add(request.NewErrParamMinLen("ExportArn", 37)) + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) } if invalidParams.Len() > 0 { @@ -10190,62 +11567,62 @@ func (s *DescribeExportInput) Validate() error { return nil } -// SetExportArn sets the ExportArn field's value. -func (s *DescribeExportInput) SetExportArn(v string) *DescribeExportInput { - s.ExportArn = &v +// SetTableName sets the TableName field's value. +func (s *DescribeTableReplicaAutoScalingInput) SetTableName(v string) *DescribeTableReplicaAutoScalingInput { + s.TableName = &v return s } -type DescribeExportOutput struct { +type DescribeTableReplicaAutoScalingOutput struct { _ struct{} `type:"structure"` - // Represents the properties of the export. - ExportDescription *ExportDescription `type:"structure"` + // Represents the auto scaling properties of the table. + TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` } // String returns the string representation -func (s DescribeExportOutput) String() string { +func (s DescribeTableReplicaAutoScalingOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeExportOutput) GoString() string { +func (s DescribeTableReplicaAutoScalingOutput) GoString() string { return s.String() } -// SetExportDescription sets the ExportDescription field's value. -func (s *DescribeExportOutput) SetExportDescription(v *ExportDescription) *DescribeExportOutput { - s.ExportDescription = v +// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. +func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *DescribeTableReplicaAutoScalingOutput { + s.TableAutoScalingDescription = v return s } -type DescribeGlobalTableInput struct { +type DescribeTimeToLiveInput struct { _ struct{} `type:"structure"` - // The name of the global table. + // The name of the table to be described. // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeGlobalTableInput) String() string { +func (s DescribeTimeToLiveInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeGlobalTableInput) GoString() string { +func (s DescribeTimeToLiveInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGlobalTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableInput"} - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) +func (s *DescribeTimeToLiveInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"} + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) } if invalidParams.Len() > 0 { @@ -10254,62 +11631,73 @@ func (s *DescribeGlobalTableInput) Validate() error { return nil } -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *DescribeGlobalTableInput) SetGlobalTableName(v string) *DescribeGlobalTableInput { - s.GlobalTableName = &v +// SetTableName sets the TableName field's value. +func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput { + s.TableName = &v return s } -type DescribeGlobalTableOutput struct { +type DescribeTimeToLiveOutput struct { _ struct{} `type:"structure"` - // Contains the details of the global table. - GlobalTableDescription *GlobalTableDescription `type:"structure"` + // The description of the Time to Live (TTL) status on the specified table. + TimeToLiveDescription *TimeToLiveDescription `type:"structure"` } // String returns the string representation -func (s DescribeGlobalTableOutput) String() string { +func (s DescribeTimeToLiveOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeGlobalTableOutput) GoString() string { +func (s DescribeTimeToLiveOutput) GoString() string { return s.String() } -// SetGlobalTableDescription sets the GlobalTableDescription field's value. -func (s *DescribeGlobalTableOutput) SetGlobalTableDescription(v *GlobalTableDescription) *DescribeGlobalTableOutput { - s.GlobalTableDescription = v +// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. +func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput { + s.TimeToLiveDescription = v return s } -type DescribeGlobalTableSettingsInput struct { +type DisableKinesisStreamingDestinationInput struct { _ struct{} `type:"structure"` - // The name of the global table to describe. + // The ARN for a Kinesis data stream. // - // GlobalTableName is a required field - GlobalTableName *string `min:"3" type:"string" required:"true"` + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeGlobalTableSettingsInput) String() string { +func (s DisableKinesisStreamingDestinationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeGlobalTableSettingsInput) GoString() string { +func (s DisableKinesisStreamingDestinationInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeGlobalTableSettingsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeGlobalTableSettingsInput"} - if s.GlobalTableName == nil { - invalidParams.Add(request.NewErrParamRequired("GlobalTableName")) +func (s *DisableKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "DisableKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) } - if s.GlobalTableName != nil && len(*s.GlobalTableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("GlobalTableName", 3)) + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) } if invalidParams.Len() > 0 { @@ -10318,209 +11706,296 @@ func (s *DescribeGlobalTableSettingsInput) Validate() error { return nil } -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *DescribeGlobalTableSettingsInput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsInput { - s.GlobalTableName = &v +// SetStreamArn sets the StreamArn field's value. +func (s *DisableKinesisStreamingDestinationInput) SetStreamArn(v string) *DisableKinesisStreamingDestinationInput { + s.StreamArn = &v return s } -type DescribeGlobalTableSettingsOutput struct { +// SetTableName sets the TableName field's value. +func (s *DisableKinesisStreamingDestinationInput) SetTableName(v string) *DisableKinesisStreamingDestinationInput { + s.TableName = &v + return s +} + +type DisableKinesisStreamingDestinationOutput struct { _ struct{} `type:"structure"` - // The name of the global table. - GlobalTableName *string `min:"3" type:"string"` + // The current status of the replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` - // The Region-specific settings for the global table. - ReplicaSettings []*ReplicaSettingsDescription `type:"list"` + // The ARN for the specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` + + // The name of the table being modified. + TableName *string `min:"3" type:"string"` } // String returns the string representation -func (s DescribeGlobalTableSettingsOutput) String() string { +func (s DisableKinesisStreamingDestinationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeGlobalTableSettingsOutput) GoString() string { +func (s DisableKinesisStreamingDestinationOutput) GoString() string { return s.String() } -// SetGlobalTableName sets the GlobalTableName field's value. -func (s *DescribeGlobalTableSettingsOutput) SetGlobalTableName(v string) *DescribeGlobalTableSettingsOutput { - s.GlobalTableName = &v +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *DisableKinesisStreamingDestinationOutput { + s.DestinationStatus = &v return s } -// SetReplicaSettings sets the ReplicaSettings field's value. -func (s *DescribeGlobalTableSettingsOutput) SetReplicaSettings(v []*ReplicaSettingsDescription) *DescribeGlobalTableSettingsOutput { - s.ReplicaSettings = v +// SetStreamArn sets the StreamArn field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetStreamArn(v string) *DisableKinesisStreamingDestinationOutput { + s.StreamArn = &v return s } -// Represents the input of a DescribeLimits operation. Has no content. -type DescribeLimitsInput struct { - _ struct{} `type:"structure"` +// SetTableName sets the TableName field's value. +func (s *DisableKinesisStreamingDestinationOutput) SetTableName(v string) *DisableKinesisStreamingDestinationOutput { + s.TableName = &v + return s +} + +// There was an attempt to insert an item with the same primary key as an item +// that already exists in the DynamoDB table. +type DuplicateItemException struct { + _ struct{} `type:"structure"` + RespMetadata protocol.ResponseMetadata `json:"-" xml:"-"` + + Message_ *string `locationName:"message" type:"string"` } // String returns the string representation -func (s DescribeLimitsInput) String() string { +func (s DuplicateItemException) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeLimitsInput) GoString() string { +func (s DuplicateItemException) GoString() string { return s.String() } -// Represents the output of a DescribeLimits operation. -type DescribeLimitsOutput struct { - _ struct{} `type:"structure"` +func newErrorDuplicateItemException(v protocol.ResponseMetadata) error { + return &DuplicateItemException{ + RespMetadata: v, + } +} - // The maximum total read capacity units that your account allows you to provision - // across all of your tables in this Region. - AccountMaxReadCapacityUnits *int64 `min:"1" type:"long"` +// Code returns the exception type name. +func (s *DuplicateItemException) Code() string { + return "DuplicateItemException" +} - // The maximum total write capacity units that your account allows you to provision - // across all of your tables in this Region. - AccountMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +// Message returns the exception's message. +func (s *DuplicateItemException) Message() string { + if s.Message_ != nil { + return *s.Message_ + } + return "" +} - // The maximum read capacity units that your account allows you to provision - // for a new table that you are creating in this Region, including the read - // capacity units provisioned for its global secondary indexes (GSIs). - TableMaxReadCapacityUnits *int64 `min:"1" type:"long"` +// OrigErr always returns nil, satisfies awserr.Error interface. +func (s *DuplicateItemException) OrigErr() error { + return nil +} - // The maximum write capacity units that your account allows you to provision - // for a new table that you are creating in this Region, including the write - // capacity units provisioned for its global secondary indexes (GSIs). - TableMaxWriteCapacityUnits *int64 `min:"1" type:"long"` +func (s *DuplicateItemException) Error() string { + return fmt.Sprintf("%s: %s", s.Code(), s.Message()) +} + +// Status code returns the HTTP status code for the request's response error. +func (s *DuplicateItemException) StatusCode() int { + return s.RespMetadata.StatusCode +} + +// RequestID returns the service's response RequestID for request. +func (s *DuplicateItemException) RequestID() string { + return s.RespMetadata.RequestID +} + +type EnableKinesisStreamingDestinationInput struct { + _ struct{} `type:"structure"` + + // The ARN for a Kinesis data stream. + // + // StreamArn is a required field + StreamArn *string `min:"37" type:"string" required:"true"` + + // The name of the DynamoDB table. + // + // TableName is a required field + TableName *string `min:"3" type:"string" required:"true"` } // String returns the string representation -func (s DescribeLimitsOutput) String() string { +func (s EnableKinesisStreamingDestinationInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeLimitsOutput) GoString() string { +func (s EnableKinesisStreamingDestinationInput) GoString() string { return s.String() } -// SetAccountMaxReadCapacityUnits sets the AccountMaxReadCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetAccountMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { - s.AccountMaxReadCapacityUnits = &v - return s -} +// Validate inspects the fields of the type to determine if they are valid. +func (s *EnableKinesisStreamingDestinationInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "EnableKinesisStreamingDestinationInput"} + if s.StreamArn == nil { + invalidParams.Add(request.NewErrParamRequired("StreamArn")) + } + if s.StreamArn != nil && len(*s.StreamArn) < 37 { + invalidParams.Add(request.NewErrParamMinLen("StreamArn", 37)) + } + if s.TableName == nil { + invalidParams.Add(request.NewErrParamRequired("TableName")) + } + if s.TableName != nil && len(*s.TableName) < 3 { + invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + } -// SetAccountMaxWriteCapacityUnits sets the AccountMaxWriteCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetAccountMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { - s.AccountMaxWriteCapacityUnits = &v - return s + if invalidParams.Len() > 0 { + return invalidParams + } + return nil } -// SetTableMaxReadCapacityUnits sets the TableMaxReadCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetTableMaxReadCapacityUnits(v int64) *DescribeLimitsOutput { - s.TableMaxReadCapacityUnits = &v +// SetStreamArn sets the StreamArn field's value. +func (s *EnableKinesisStreamingDestinationInput) SetStreamArn(v string) *EnableKinesisStreamingDestinationInput { + s.StreamArn = &v return s } -// SetTableMaxWriteCapacityUnits sets the TableMaxWriteCapacityUnits field's value. -func (s *DescribeLimitsOutput) SetTableMaxWriteCapacityUnits(v int64) *DescribeLimitsOutput { - s.TableMaxWriteCapacityUnits = &v +// SetTableName sets the TableName field's value. +func (s *EnableKinesisStreamingDestinationInput) SetTableName(v string) *EnableKinesisStreamingDestinationInput { + s.TableName = &v return s } -// Represents the input of a DescribeTable operation. -type DescribeTableInput struct { +type EnableKinesisStreamingDestinationOutput struct { _ struct{} `type:"structure"` - // The name of the table to describe. - // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + // The current status of the replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The ARN for the specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` + + // The name of the table being modified. + TableName *string `min:"3" type:"string"` } // String returns the string representation -func (s DescribeTableInput) String() string { +func (s EnableKinesisStreamingDestinationOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTableInput) GoString() string { +func (s EnableKinesisStreamingDestinationOutput) GoString() string { return s.String() } -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTableInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTableInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) - } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetDestinationStatus(v string) *EnableKinesisStreamingDestinationOutput { + s.DestinationStatus = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *EnableKinesisStreamingDestinationOutput) SetStreamArn(v string) *EnableKinesisStreamingDestinationOutput { + s.StreamArn = &v + return s } // SetTableName sets the TableName field's value. -func (s *DescribeTableInput) SetTableName(v string) *DescribeTableInput { +func (s *EnableKinesisStreamingDestinationOutput) SetTableName(v string) *EnableKinesisStreamingDestinationOutput { s.TableName = &v return s } -// Represents the output of a DescribeTable operation. -type DescribeTableOutput struct { +// An endpoint information details. +type Endpoint struct { _ struct{} `type:"structure"` - // The properties of the table. - Table *TableDescription `type:"structure"` + // IP address of the endpoint. + // + // Address is a required field + Address *string `type:"string" required:"true"` + + // Endpoint cache time to live (TTL) value. + // + // CachePeriodInMinutes is a required field + CachePeriodInMinutes *int64 `type:"long" required:"true"` } // String returns the string representation -func (s DescribeTableOutput) String() string { +func (s Endpoint) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTableOutput) GoString() string { +func (s Endpoint) GoString() string { return s.String() } -// SetTable sets the Table field's value. -func (s *DescribeTableOutput) SetTable(v *TableDescription) *DescribeTableOutput { - s.Table = v +// SetAddress sets the Address field's value. +func (s *Endpoint) SetAddress(v string) *Endpoint { + s.Address = &v return s } -type DescribeTableReplicaAutoScalingInput struct { +// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value. +func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { + s.CachePeriodInMinutes = &v + return s +} + +type ExecuteStatementInput struct { _ struct{} `type:"structure"` - // The name of the table. + // The consistency of a read operation. If set to true, then a strongly consistent + // read is used; otherwise, an eventually consistent read is used. + ConsistentRead *bool `type:"boolean"` + + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + NextToken *string `min:"1" type:"string"` + + // The parameters for the PartiQL statement, if any. + Parameters []*AttributeValue `min:"1" type:"list"` + + // The PartiQL statement representing the operation to run. // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` } // String returns the string representation -func (s DescribeTableReplicaAutoScalingInput) String() string { +func (s ExecuteStatementInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTableReplicaAutoScalingInput) GoString() string { +func (s ExecuteStatementInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTableReplicaAutoScalingInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTableReplicaAutoScalingInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *ExecuteStatementInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteStatementInput"} + if s.NextToken != nil && len(*s.NextToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) } if invalidParams.Len() > 0 { @@ -10529,62 +12004,110 @@ func (s *DescribeTableReplicaAutoScalingInput) Validate() error { return nil } -// SetTableName sets the TableName field's value. -func (s *DescribeTableReplicaAutoScalingInput) SetTableName(v string) *DescribeTableReplicaAutoScalingInput { - s.TableName = &v +// SetConsistentRead sets the ConsistentRead field's value. +func (s *ExecuteStatementInput) SetConsistentRead(v bool) *ExecuteStatementInput { + s.ConsistentRead = &v return s } -type DescribeTableReplicaAutoScalingOutput struct { +// SetNextToken sets the NextToken field's value. +func (s *ExecuteStatementInput) SetNextToken(v string) *ExecuteStatementInput { + s.NextToken = &v + return s +} + +// SetParameters sets the Parameters field's value. +func (s *ExecuteStatementInput) SetParameters(v []*AttributeValue) *ExecuteStatementInput { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *ExecuteStatementInput) SetStatement(v string) *ExecuteStatementInput { + s.Statement = &v + return s +} + +type ExecuteStatementOutput struct { _ struct{} `type:"structure"` - // Represents the auto scaling properties of the table. - TableAutoScalingDescription *TableAutoScalingDescription `type:"structure"` + // If a read operation was used, this property will contain the result of the + // reade operation; a map of attribute names and their values. For the write + // operations this value will be empty. + Items []map[string]*AttributeValue `type:"list"` + + // If the response of a read request exceeds the response payload limit DynamoDB + // will set this value in the response. If set, you can use that this value + // in the subsequent request to get the remaining results. + NextToken *string `min:"1" type:"string"` } // String returns the string representation -func (s DescribeTableReplicaAutoScalingOutput) String() string { +func (s ExecuteStatementOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTableReplicaAutoScalingOutput) GoString() string { +func (s ExecuteStatementOutput) GoString() string { return s.String() } -// SetTableAutoScalingDescription sets the TableAutoScalingDescription field's value. -func (s *DescribeTableReplicaAutoScalingOutput) SetTableAutoScalingDescription(v *TableAutoScalingDescription) *DescribeTableReplicaAutoScalingOutput { - s.TableAutoScalingDescription = v +// SetItems sets the Items field's value. +func (s *ExecuteStatementOutput) SetItems(v []map[string]*AttributeValue) *ExecuteStatementOutput { + s.Items = v return s } -type DescribeTimeToLiveInput struct { +// SetNextToken sets the NextToken field's value. +func (s *ExecuteStatementOutput) SetNextToken(v string) *ExecuteStatementOutput { + s.NextToken = &v + return s +} + +type ExecuteTransactionInput struct { _ struct{} `type:"structure"` - // The name of the table to be described. + // Set this value to get remaining results, if NextToken was returned in the + // statement response. + ClientRequestToken *string `min:"1" type:"string" idempotencyToken:"true"` + + // The list of PartiQL statements representing the transaction to run. // - // TableName is a required field - TableName *string `min:"3" type:"string" required:"true"` + // TransactStatements is a required field + TransactStatements []*ParameterizedStatement `min:"1" type:"list" required:"true"` } // String returns the string representation -func (s DescribeTimeToLiveInput) String() string { +func (s ExecuteTransactionInput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s DescribeTimeToLiveInput) GoString() string { +func (s ExecuteTransactionInput) GoString() string { return s.String() } // Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeTimeToLiveInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeTimeToLiveInput"} - if s.TableName == nil { - invalidParams.Add(request.NewErrParamRequired("TableName")) +func (s *ExecuteTransactionInput) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ExecuteTransactionInput"} + if s.ClientRequestToken != nil && len(*s.ClientRequestToken) < 1 { + invalidParams.Add(request.NewErrParamMinLen("ClientRequestToken", 1)) } - if s.TableName != nil && len(*s.TableName) < 3 { - invalidParams.Add(request.NewErrParamMinLen("TableName", 3)) + if s.TransactStatements == nil { + invalidParams.Add(request.NewErrParamRequired("TransactStatements")) + } + if s.TransactStatements != nil && len(s.TransactStatements) < 1 { + invalidParams.Add(request.NewErrParamMinLen("TransactStatements", 1)) + } + if s.TransactStatements != nil { + for i, v := range s.TransactStatements { + if v == nil { + continue + } + if err := v.Validate(); err != nil { + invalidParams.AddNested(fmt.Sprintf("%s[%v]", "TransactStatements", i), err.(request.ErrInvalidParams)) + } + } } if invalidParams.Len() > 0 { @@ -10593,69 +12116,38 @@ func (s *DescribeTimeToLiveInput) Validate() error { return nil } -// SetTableName sets the TableName field's value. -func (s *DescribeTimeToLiveInput) SetTableName(v string) *DescribeTimeToLiveInput { - s.TableName = &v +// SetClientRequestToken sets the ClientRequestToken field's value. +func (s *ExecuteTransactionInput) SetClientRequestToken(v string) *ExecuteTransactionInput { + s.ClientRequestToken = &v return s } -type DescribeTimeToLiveOutput struct { - _ struct{} `type:"structure"` - - // The description of the Time to Live (TTL) status on the specified table. - TimeToLiveDescription *TimeToLiveDescription `type:"structure"` -} - -// String returns the string representation -func (s DescribeTimeToLiveOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeTimeToLiveOutput) GoString() string { - return s.String() -} - -// SetTimeToLiveDescription sets the TimeToLiveDescription field's value. -func (s *DescribeTimeToLiveOutput) SetTimeToLiveDescription(v *TimeToLiveDescription) *DescribeTimeToLiveOutput { - s.TimeToLiveDescription = v +// SetTransactStatements sets the TransactStatements field's value. +func (s *ExecuteTransactionInput) SetTransactStatements(v []*ParameterizedStatement) *ExecuteTransactionInput { + s.TransactStatements = v return s } -// An endpoint information details. -type Endpoint struct { +type ExecuteTransactionOutput struct { _ struct{} `type:"structure"` - // IP address of the endpoint. - // - // Address is a required field - Address *string `type:"string" required:"true"` - - // Endpoint cache time to live (TTL) value. - // - // CachePeriodInMinutes is a required field - CachePeriodInMinutes *int64 `type:"long" required:"true"` + // The response to a PartiQL transaction. + Responses []*ItemResponse `min:"1" type:"list"` } // String returns the string representation -func (s Endpoint) String() string { +func (s ExecuteTransactionOutput) String() string { return awsutil.Prettify(s) } // GoString returns the string representation -func (s Endpoint) GoString() string { +func (s ExecuteTransactionOutput) GoString() string { return s.String() } -// SetAddress sets the Address field's value. -func (s *Endpoint) SetAddress(v string) *Endpoint { - s.Address = &v - return s -} - -// SetCachePeriodInMinutes sets the CachePeriodInMinutes field's value. -func (s *Endpoint) SetCachePeriodInMinutes(v int64) *Endpoint { - s.CachePeriodInMinutes = &v +// SetResponses sets the Responses field's value. +func (s *ExecuteTransactionOutput) SetResponses(v []*ItemResponse) *ExecuteTransactionOutput { + s.Responses = v return s } @@ -13087,6 +14579,48 @@ func (s *KeysAndAttributes) SetProjectionExpression(v string) *KeysAndAttributes return s } +// Describes a Kinesis data stream destination. +type KinesisDataStreamDestination struct { + _ struct{} `type:"structure"` + + // The current status of replication. + DestinationStatus *string `type:"string" enum:"DestinationStatus"` + + // The human-readable string that corresponds to the replica status. + DestinationStatusDescription *string `type:"string"` + + // The ARN for a specific Kinesis data stream. + StreamArn *string `min:"37" type:"string"` +} + +// String returns the string representation +func (s KinesisDataStreamDestination) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s KinesisDataStreamDestination) GoString() string { + return s.String() +} + +// SetDestinationStatus sets the DestinationStatus field's value. +func (s *KinesisDataStreamDestination) SetDestinationStatus(v string) *KinesisDataStreamDestination { + s.DestinationStatus = &v + return s +} + +// SetDestinationStatusDescription sets the DestinationStatusDescription field's value. +func (s *KinesisDataStreamDestination) SetDestinationStatusDescription(v string) *KinesisDataStreamDestination { + s.DestinationStatusDescription = &v + return s +} + +// SetStreamArn sets the StreamArn field's value. +func (s *KinesisDataStreamDestination) SetStreamArn(v string) *KinesisDataStreamDestination { + s.StreamArn = &v + return s +} + // There is no limit to the number of daily on-demand backups that can be taken. // // Up to 50 simultaneous table operations are allowed per account. These operations @@ -14000,6 +15534,60 @@ func (s *LocalSecondaryIndexInfo) SetProjection(v *Projection) *LocalSecondaryIn return s } +// Represents a PartiQL statment that uses parameters. +type ParameterizedStatement struct { + _ struct{} `type:"structure"` + + // The parameter values. + Parameters []*AttributeValue `min:"1" type:"list"` + + // A PartiQL statment that uses parameters. + // + // Statement is a required field + Statement *string `min:"1" type:"string" required:"true"` +} + +// String returns the string representation +func (s ParameterizedStatement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ParameterizedStatement) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ParameterizedStatement) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ParameterizedStatement"} + if s.Parameters != nil && len(s.Parameters) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Parameters", 1)) + } + if s.Statement == nil { + invalidParams.Add(request.NewErrParamRequired("Statement")) + } + if s.Statement != nil && len(*s.Statement) < 1 { + invalidParams.Add(request.NewErrParamMinLen("Statement", 1)) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetParameters sets the Parameters field's value. +func (s *ParameterizedStatement) SetParameters(v []*AttributeValue) *ParameterizedStatement { + s.Parameters = v + return s +} + +// SetStatement sets the Statement field's value. +func (s *ParameterizedStatement) SetStatement(v string) *ParameterizedStatement { + s.Statement = &v + return s +} + // The description of the point in time settings applied to the table. type PointInTimeRecoveryDescription struct { _ struct{} `type:"structure"` @@ -21049,6 +22637,58 @@ func BackupTypeFilter_Values() []string { } } +const ( + // BatchStatementErrorCodeEnumConditionalCheckFailed is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumConditionalCheckFailed = "ConditionalCheckFailed" + + // BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded = "ItemCollectionSizeLimitExceeded" + + // BatchStatementErrorCodeEnumRequestLimitExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumRequestLimitExceeded = "RequestLimitExceeded" + + // BatchStatementErrorCodeEnumValidationError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumValidationError = "ValidationError" + + // BatchStatementErrorCodeEnumProvisionedThroughputExceeded is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumProvisionedThroughputExceeded = "ProvisionedThroughputExceeded" + + // BatchStatementErrorCodeEnumTransactionConflict is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumTransactionConflict = "TransactionConflict" + + // BatchStatementErrorCodeEnumThrottlingError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumThrottlingError = "ThrottlingError" + + // BatchStatementErrorCodeEnumInternalServerError is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumInternalServerError = "InternalServerError" + + // BatchStatementErrorCodeEnumResourceNotFound is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumResourceNotFound = "ResourceNotFound" + + // BatchStatementErrorCodeEnumAccessDenied is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumAccessDenied = "AccessDenied" + + // BatchStatementErrorCodeEnumDuplicateItem is a BatchStatementErrorCodeEnum enum value + BatchStatementErrorCodeEnumDuplicateItem = "DuplicateItem" +) + +// BatchStatementErrorCodeEnum_Values returns all elements of the BatchStatementErrorCodeEnum enum +func BatchStatementErrorCodeEnum_Values() []string { + return []string{ + BatchStatementErrorCodeEnumConditionalCheckFailed, + BatchStatementErrorCodeEnumItemCollectionSizeLimitExceeded, + BatchStatementErrorCodeEnumRequestLimitExceeded, + BatchStatementErrorCodeEnumValidationError, + BatchStatementErrorCodeEnumProvisionedThroughputExceeded, + BatchStatementErrorCodeEnumTransactionConflict, + BatchStatementErrorCodeEnumThrottlingError, + BatchStatementErrorCodeEnumInternalServerError, + BatchStatementErrorCodeEnumResourceNotFound, + BatchStatementErrorCodeEnumAccessDenied, + BatchStatementErrorCodeEnumDuplicateItem, + } +} + const ( // BillingModeProvisioned is a BillingMode enum value BillingModeProvisioned = "PROVISIONED" @@ -21201,6 +22841,34 @@ func ContributorInsightsStatus_Values() []string { } } +const ( + // DestinationStatusEnabling is a DestinationStatus enum value + DestinationStatusEnabling = "ENABLING" + + // DestinationStatusActive is a DestinationStatus enum value + DestinationStatusActive = "ACTIVE" + + // DestinationStatusDisabling is a DestinationStatus enum value + DestinationStatusDisabling = "DISABLING" + + // DestinationStatusDisabled is a DestinationStatus enum value + DestinationStatusDisabled = "DISABLED" + + // DestinationStatusEnableFailed is a DestinationStatus enum value + DestinationStatusEnableFailed = "ENABLE_FAILED" +) + +// DestinationStatus_Values returns all elements of the DestinationStatus enum +func DestinationStatus_Values() []string { + return []string{ + DestinationStatusEnabling, + DestinationStatusActive, + DestinationStatusDisabling, + DestinationStatusDisabled, + DestinationStatusEnableFailed, + } +} + const ( // ExportFormatDynamodbJson is a ExportFormat enum value ExportFormatDynamodbJson = "DYNAMODB_JSON" diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go index d92f770ca1b..19f596f8e3e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface/interface.go @@ -26,7 +26,7 @@ import ( // // myFunc uses an SDK service client to make a request to // // Amazon DynamoDB. // func myFunc(svc dynamodbiface.DynamoDBAPI) bool { -// // Make svc.BatchGetItem request +// // Make svc.BatchExecuteStatement request // } // // func main() { @@ -42,7 +42,7 @@ import ( // type mockDynamoDBClient struct { // dynamodbiface.DynamoDBAPI // } -// func (m *mockDynamoDBClient) BatchGetItem(input *dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) { +// func (m *mockDynamoDBClient) BatchExecuteStatement(input *dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) { // // mock response/functionality // } // @@ -60,6 +60,10 @@ import ( // and waiters. Its suggested to use the pattern above for testing, or using // tooling to generate mocks to satisfy the interfaces. type DynamoDBAPI interface { + BatchExecuteStatement(*dynamodb.BatchExecuteStatementInput) (*dynamodb.BatchExecuteStatementOutput, error) + BatchExecuteStatementWithContext(aws.Context, *dynamodb.BatchExecuteStatementInput, ...request.Option) (*dynamodb.BatchExecuteStatementOutput, error) + BatchExecuteStatementRequest(*dynamodb.BatchExecuteStatementInput) (*request.Request, *dynamodb.BatchExecuteStatementOutput) + BatchGetItem(*dynamodb.BatchGetItemInput) (*dynamodb.BatchGetItemOutput, error) BatchGetItemWithContext(aws.Context, *dynamodb.BatchGetItemInput, ...request.Option) (*dynamodb.BatchGetItemOutput, error) BatchGetItemRequest(*dynamodb.BatchGetItemInput) (*request.Request, *dynamodb.BatchGetItemOutput) @@ -123,6 +127,10 @@ type DynamoDBAPI interface { DescribeGlobalTableSettingsWithContext(aws.Context, *dynamodb.DescribeGlobalTableSettingsInput, ...request.Option) (*dynamodb.DescribeGlobalTableSettingsOutput, error) DescribeGlobalTableSettingsRequest(*dynamodb.DescribeGlobalTableSettingsInput) (*request.Request, *dynamodb.DescribeGlobalTableSettingsOutput) + DescribeKinesisStreamingDestination(*dynamodb.DescribeKinesisStreamingDestinationInput) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) + DescribeKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DescribeKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DescribeKinesisStreamingDestinationOutput, error) + DescribeKinesisStreamingDestinationRequest(*dynamodb.DescribeKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DescribeKinesisStreamingDestinationOutput) + DescribeLimits(*dynamodb.DescribeLimitsInput) (*dynamodb.DescribeLimitsOutput, error) DescribeLimitsWithContext(aws.Context, *dynamodb.DescribeLimitsInput, ...request.Option) (*dynamodb.DescribeLimitsOutput, error) DescribeLimitsRequest(*dynamodb.DescribeLimitsInput) (*request.Request, *dynamodb.DescribeLimitsOutput) @@ -139,6 +147,22 @@ type DynamoDBAPI interface { DescribeTimeToLiveWithContext(aws.Context, *dynamodb.DescribeTimeToLiveInput, ...request.Option) (*dynamodb.DescribeTimeToLiveOutput, error) DescribeTimeToLiveRequest(*dynamodb.DescribeTimeToLiveInput) (*request.Request, *dynamodb.DescribeTimeToLiveOutput) + DisableKinesisStreamingDestination(*dynamodb.DisableKinesisStreamingDestinationInput) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) + DisableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.DisableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.DisableKinesisStreamingDestinationOutput, error) + DisableKinesisStreamingDestinationRequest(*dynamodb.DisableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.DisableKinesisStreamingDestinationOutput) + + EnableKinesisStreamingDestination(*dynamodb.EnableKinesisStreamingDestinationInput) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) + EnableKinesisStreamingDestinationWithContext(aws.Context, *dynamodb.EnableKinesisStreamingDestinationInput, ...request.Option) (*dynamodb.EnableKinesisStreamingDestinationOutput, error) + EnableKinesisStreamingDestinationRequest(*dynamodb.EnableKinesisStreamingDestinationInput) (*request.Request, *dynamodb.EnableKinesisStreamingDestinationOutput) + + ExecuteStatement(*dynamodb.ExecuteStatementInput) (*dynamodb.ExecuteStatementOutput, error) + ExecuteStatementWithContext(aws.Context, *dynamodb.ExecuteStatementInput, ...request.Option) (*dynamodb.ExecuteStatementOutput, error) + ExecuteStatementRequest(*dynamodb.ExecuteStatementInput) (*request.Request, *dynamodb.ExecuteStatementOutput) + + ExecuteTransaction(*dynamodb.ExecuteTransactionInput) (*dynamodb.ExecuteTransactionOutput, error) + ExecuteTransactionWithContext(aws.Context, *dynamodb.ExecuteTransactionInput, ...request.Option) (*dynamodb.ExecuteTransactionOutput, error) + ExecuteTransactionRequest(*dynamodb.ExecuteTransactionInput) (*request.Request, *dynamodb.ExecuteTransactionOutput) + ExportTableToPointInTime(*dynamodb.ExportTableToPointInTimeInput) (*dynamodb.ExportTableToPointInTimeOutput, error) ExportTableToPointInTimeWithContext(aws.Context, *dynamodb.ExportTableToPointInTimeInput, ...request.Option) (*dynamodb.ExportTableToPointInTimeOutput, error) ExportTableToPointInTimeRequest(*dynamodb.ExportTableToPointInTimeInput) (*request.Request, *dynamodb.ExportTableToPointInTimeOutput) diff --git a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go index 8a9f3485dfd..517229dee89 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go +++ b/vendor/github.com/aws/aws-sdk-go/service/dynamodb/errors.go @@ -33,6 +33,13 @@ const ( // Backups have not yet been enabled for this table. ErrCodeContinuousBackupsUnavailableException = "ContinuousBackupsUnavailableException" + // ErrCodeDuplicateItemException for service response error code + // "DuplicateItemException". + // + // There was an attempt to insert an item with the same primary key as an item + // that already exists in the DynamoDB table. + ErrCodeDuplicateItemException = "DuplicateItemException" + // ErrCodeExportConflictException for service response error code // "ExportConflictException". // @@ -292,6 +299,7 @@ var exceptionFromCode = map[string]func(protocol.ResponseMetadata) error{ "BackupNotFoundException": newErrorBackupNotFoundException, "ConditionalCheckFailedException": newErrorConditionalCheckFailedException, "ContinuousBackupsUnavailableException": newErrorContinuousBackupsUnavailableException, + "DuplicateItemException": newErrorDuplicateItemException, "ExportConflictException": newErrorExportConflictException, "ExportNotFoundException": newErrorExportNotFoundException, "GlobalTableAlreadyExistsException": newErrorGlobalTableAlreadyExistsException, diff --git a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go index 2ab5d1dad25..89a0a29afff 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/s3/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/s3/api.go @@ -392,21 +392,19 @@ func (c *S3) CopyObjectRequest(input *CopyObjectInput) (req *request.Request, ou // All headers with the x-amz- prefix, including x-amz-copy-source, must be // signed. // -// Encryption -// -// The source object that you are copying can be encrypted or unencrypted. The -// source object can be encrypted with server-side encryption using AWS managed -// encryption keys (SSE-S3 or SSE-KMS) or by using a customer-provided encryption -// key. With server-side encryption, Amazon S3 encrypts your data as it writes -// it to disks in its data centers and decrypts the data when you access it. -// -// You can optionally use the appropriate encryption-related headers to request -// server-side encryption for the target object. You have the option to provide -// your own encryption key or use SSE-S3 or SSE-KMS, regardless of the form -// of server-side encryption that was used to encrypt the source object. You -// can even request encryption if the source object was not encrypted. For more -// information about server-side encryption, see Using Server-Side Encryption -// (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// Server-side encryption +// +// When you perform a CopyObject operation, you can optionally use the appropriate +// encryption-related headers to encrypt the object using server-side encryption +// with AWS managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided +// encryption key. With server-side encryption, Amazon S3 encrypts your data +// as it writes it to disks in its data centers and decrypts the data when you +// access it. For more information about server-side encryption, see Using Server-Side +// Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html). +// +// If a target object uses SSE-KMS, you can enable an S3 Bucket Key for the +// object. For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -4513,10 +4511,10 @@ func (c *S3) GetObjectRequest(input *GetObjectInput) (req *request.Request, outp // For more information about returning the ACL of an object, see GetObjectAcl // (https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html). // -// If the object you are retrieving is stored in the S3 Glacier, S3 Glacier -// Deep Archive, S3 Intelligent-Tiering Archive, or S3 Intelligent-Tiering Deep -// Archive storage classes, before you can retrieve the object you must first -// restore a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). +// If the object you are retrieving is stored in the S3 Glacier or S3 Glacier +// Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering +// Deep Archive tiers, before you can retrieve the object you must first restore +// a copy using RestoreObject (https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html). // Otherwise, this operation returns an InvalidObjectStateError error. For information // about restoring archived objects, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html). // @@ -7316,13 +7314,17 @@ func (c *S3) PutBucketEncryptionRequest(input *PutBucketEncryptionInput) (req *r // PutBucketEncryption API operation for Amazon Simple Storage Service. // -// This implementation of the PUT operation uses the encryption subresource -// to set the default encryption state of an existing bucket. +// This operation uses the encryption subresource to configure default encryption +// and Amazon S3 Bucket Key for an existing bucket. // -// This implementation of the PUT operation sets default encryption for a bucket -// using server-side encryption with Amazon S3-managed keys SSE-S3 or AWS KMS -// customer master keys (CMKs) (SSE-KMS). For information about the Amazon S3 -// default encryption feature, see Amazon S3 Default Bucket Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html). +// Default encryption for a bucket can use server-side encryption with Amazon +// S3-managed keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you +// specify default encryption using SSE-KMS, you can also configure Amazon S3 +// Bucket Key. For information about default encryption, see Amazon S3 default +// bucket encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html) +// in the Amazon Simple Storage Service Developer Guide. For more information +// about S3 Bucket Keys, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. // // This operation requires AWS Signature Version 4. For more information, see // Authenticating Requests (AWS Signature Version 4) (sig-v4-authenticating-requests.html). @@ -8346,6 +8348,10 @@ func (c *S3) PutBucketOwnershipControlsRequest(input *PutBucketOwnershipControls output = &PutBucketOwnershipControlsOutput{} req = c.newRequest(op, input, output) req.Handlers.Unmarshal.Swap(restxml.UnmarshalHandler.Name, protocol.UnmarshalDiscardBodyHandler) + req.Handlers.Build.PushBackNamed(request.NamedHandler{ + Name: "contentMd5Handler", + Fn: checksum.AddBodyContentMD5Handler, + }) return } @@ -8551,15 +8557,14 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // permission. // // Specify the replication configuration in the request body. In the replication -// configuration, you provide the name of the destination bucket where you want -// Amazon S3 to replicate objects, the IAM role that Amazon S3 can assume to -// replicate objects on your behalf, and other relevant information. +// configuration, you provide the name of the destination bucket or buckets +// where you want Amazon S3 to replicate objects, the IAM role that Amazon S3 +// can assume to replicate objects on your behalf, and other relevant information. // // A replication configuration must include at least one rule, and can contain // a maximum of 1,000. Each rule identifies a subset of objects to replicate // by filtering the objects in the source bucket. To choose additional subsets -// of objects to replicate, add a rule for each subset. All rules must specify -// the same destination bucket. +// of objects to replicate, add a rule for each subset. // // To specify a subset of the objects in the source bucket to apply a replication // rule to, add the Filter element as a child of the Rule element. You can filter @@ -8567,12 +8572,9 @@ func (c *S3) PutBucketReplicationRequest(input *PutBucketReplicationInput) (req // When you add the Filter element in the configuration, you must also add the // following elements: DeleteMarkerReplication, Status, and Priority. // -// The latest version of the replication configuration XML is V2. XML V2 replication -// configurations are those that contain the Filter element for rules, and rules -// that specify S3 Replication Time Control (S3 RTC). In XML V2 replication -// configurations, Amazon S3 doesn't replicate delete markers. Therefore, you -// must set the DeleteMarkerReplication element to Disabled. For backward compatibility, -// Amazon S3 continues to support the XML V1 replication configuration. +// If you are using an earlier version of the replication configuration, Amazon +// S3 handles replication of delete markers differently. For more information, +// see Backward Compatibility (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations). // // For information about enabling versioning on a bucket, see Using Versioning // (https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html). @@ -9166,8 +9168,13 @@ func (c *S3) PutObjectRequest(input *PutObjectInput) (req *request.Request, outp // You can optionally request server-side encryption. With server-side encryption, // Amazon S3 encrypts your data as it writes it to disks in its data centers // and decrypts the data when you access it. You have the option to provide -// your own encryption key or use AWS managed encryption keys. For more information, -// see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// your own encryption key or use AWS managed encryption keys (SSE-S3 or SSE-KMS). +// For more information, see Using Server-Side Encryption (https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html). +// +// If you request server-side encryption using AWS Key Management Service (SSE-KMS), +// you can enable an S3 Bucket Key at the object-level. For more information, +// see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) +// in the Amazon Simple Storage Service Developer Guide. // // Access Control List (ACL)-Specific Request Headers // @@ -9999,16 +10006,17 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // * Amazon S3 accepts a select request even if the object has already been // restored. A select request doesn’t return error response 409. // -// Restoring Archives +// Restoring objects // -// Objects that you archive to the S3 Glacier, S3 Glacier Deep Archive, S3 Intelligent-Tiering -// Archive, or S3 Intelligent-Tiering Deep Archive storage classes are not accessible -// in real time. For objects in Archive Access tier or Deep Archive Access tier -// you must first initiate a restore request, and then wait until the object -// is moved into the Frequent Access tier. For objects in S3 Glacier or S3 Glacier -// Deep Archive you must first initiate a restore request, and then wait until -// a temporary copy of the object is available. To access an archived object, -// you must restore the object for the duration (number of days) that you specify. +// Objects that you archive to the S3 Glacier or S3 Glacier Deep Archive storage +// class, and S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep +// Archive tiers are not accessible in real time. For objects in Archive Access +// or Deep Archive Access tiers you must first initiate a restore request, and +// then wait until the object is moved into the Frequent Access tier. For objects +// in S3 Glacier or S3 Glacier Deep Archive storage classes you must first initiate +// a restore request, and then wait until a temporary copy of the object is +// available. To access an archived object, you must restore the object for +// the duration (number of days) that you specify. // // To restore a specific object version, you can provide a version ID. If you // don't provide a version ID, Amazon S3 restores the current version. @@ -10018,31 +10026,31 @@ func (c *S3) RestoreObjectRequest(input *RestoreObjectInput) (req *request.Reque // request body: // // * Expedited - Expedited retrievals allow you to quickly access your data -// stored in the S3 Glacier or S3 Intelligent-Tiering Archive storage class -// when occasional urgent requests for a subset of archives are required. +// stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive +// tier when occasional urgent requests for a subset of archives are required. // For all but the largest archived objects (250 MB+), data accessed using // Expedited retrievals is typically made available within 1–5 minutes. // Provisioned capacity ensures that retrieval capacity for Expedited retrievals // is available when you need it. Expedited retrievals and provisioned capacity -// are not available for objects stored in the S3 Glacier Deep Archive or -// S3 Intelligent-Tiering Deep Archive storage class. +// are not available for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. // // * Standard - Standard retrievals allow you to access any of your archived // objects within several hours. This is the default option for retrieval // requests that do not specify the retrieval option. Standard retrievals // typically finish within 3–5 hours for objects stored in the S3 Glacier -// or S3 Intelligent-Tiering Archive storage class. They typically finish -// within 12 hours for objects stored in the S3 Glacier Deep Archive or S3 -// Intelligent-Tiering Deep Archive storage class. Standard retrievals are -// free for objects stored in S3 Intelligent-Tiering. +// storage class or S3 Intelligent-Tiering Archive tier. They typically finish +// within 12 hours for objects stored in the S3 Glacier Deep Archive storage +// class or S3 Intelligent-Tiering Deep Archive tier. Standard retrievals +// are free for objects stored in S3 Intelligent-Tiering. // // * Bulk - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, // enabling you to retrieve large amounts, even petabytes, of data inexpensively. // Bulk retrievals typically finish within 5–12 hours for objects stored -// in the S3 Glacier or S3 Intelligent-Tiering Archive storage class. They -// typically finish within 48 hours for objects stored in the S3 Glacier -// Deep Archive or S3 Intelligent-Tiering Deep Archive storage class. Bulk -// retrievals are free for objects stored in S3 Intelligent-Tiering. +// in the S3 Glacier storage class or S3 Intelligent-Tiering Archive tier. +// They typically finish within 48 hours for objects stored in the S3 Glacier +// Deep Archive storage class or S3 Intelligent-Tiering Deep Archive tier. +// Bulk retrievals are free for objects stored in S3 Intelligent-Tiering. // // For more information about archive retrieval options and provisioned capacity // for Expedited data access, see Restoring Archived Objects (https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html) @@ -11446,7 +11454,8 @@ func (s *AnalyticsS3BucketDestination) SetPrefix(v string) *AnalyticsS3BucketDes type Bucket struct { _ struct{} `type:"structure"` - // Date the bucket was created. + // Date the bucket was created. This date can change when making changes to + // your bucket, such as editing its bucket policy. CreationDate *time.Time `type:"timestamp"` // The name of the bucket. @@ -12134,6 +12143,10 @@ type CompleteMultipartUploadOutput struct { // in the Amazon Simple Storage Service Developer Guide. Bucket *string `type:"string"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Entity tag that identifies the newly created object's data. Objects with // different object data will have different entity tags. The entity tag is // an opaque string. The entity tag may or may not be an MD5 digest of the object @@ -12195,6 +12208,12 @@ func (s *CompleteMultipartUploadOutput) getBucket() (v string) { return *s.Bucket } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CompleteMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CompleteMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + // SetETag sets the ETag field's value. func (s *CompleteMultipartUploadOutput) SetETag(v string) *CompleteMultipartUploadOutput { s.ETag = &v @@ -12408,6 +12427,15 @@ type CopyObjectInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a COPY operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -12656,6 +12684,12 @@ func (s *CopyObjectInput) getBucket() (v string) { return *s.Bucket } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectInput) SetBucketKeyEnabled(v bool) *CopyObjectInput { + s.BucketKeyEnabled = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *CopyObjectInput) SetCacheControl(v string) *CopyObjectInput { s.CacheControl = &v @@ -12922,6 +12956,10 @@ func (s CopyObjectInput) updateArnableField(v string) (interface{}, error) { type CopyObjectOutput struct { _ struct{} `type:"structure" payload:"CopyObjectResult"` + // Indicates whether the copied object uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Container for all response elements. CopyObjectResult *CopyObjectResult `type:"structure"` @@ -12973,6 +13011,12 @@ func (s CopyObjectOutput) GoString() string { return s.String() } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CopyObjectOutput) SetBucketKeyEnabled(v bool) *CopyObjectOutput { + s.BucketKeyEnabled = &v + return s +} + // SetCopyObjectResult sets the CopyObjectResult field's value. func (s *CopyObjectOutput) SetCopyObjectResult(v *CopyObjectResult) *CopyObjectOutput { s.CopyObjectResult = v @@ -13300,6 +13344,15 @@ type CreateMultipartUploadInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with an object operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -13470,6 +13523,12 @@ func (s *CreateMultipartUploadInput) getBucket() (v string) { return *s.Bucket } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadInput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadInput { + s.BucketKeyEnabled = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *CreateMultipartUploadInput) SetCacheControl(v string) *CreateMultipartUploadInput { s.CacheControl = &v @@ -13697,6 +13756,10 @@ type CreateMultipartUploadOutput struct { // in the Amazon Simple Storage Service Developer Guide. Bucket *string `locationName:"Bucket" type:"string"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Object key for which the multipart upload was initiated. Key *string `min:"1" type:"string"` @@ -13767,6 +13830,12 @@ func (s *CreateMultipartUploadOutput) getBucket() (v string) { return *s.Bucket } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *CreateMultipartUploadOutput) SetBucketKeyEnabled(v bool) *CreateMultipartUploadOutput { + s.BucketKeyEnabled = &v + return s +} + // SetKey sets the Key field's value. func (s *CreateMultipartUploadOutput) SetKey(v string) *CreateMultipartUploadOutput { s.Key = &v @@ -19494,7 +19563,7 @@ type GetObjectInput struct { ResponseContentType *string `location:"querystring" locationName:"response-content-type" type:"string"` // Sets the Expires header of the response. - ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp"` + ResponseExpires *time.Time `location:"querystring" locationName:"response-expires" type:"timestamp" timestampFormat:"rfc822"` // Specifies the algorithm to use to when encrypting the object (for example, // AES256). @@ -19990,6 +20059,10 @@ type GetObjectOutput struct { // Object data. Body io.ReadCloser `type:"blob"` + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -20128,6 +20201,12 @@ func (s *GetObjectOutput) SetBody(v io.ReadCloser) *GetObjectOutput { return s } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *GetObjectOutput) SetBucketKeyEnabled(v bool) *GetObjectOutput { + s.BucketKeyEnabled = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *GetObjectOutput) SetCacheControl(v string) *GetObjectOutput { s.CacheControl = &v @@ -21435,6 +21514,10 @@ type HeadObjectOutput struct { // The archive state of the head object. ArchiveStatus *string `location:"header" locationName:"x-amz-archive-status" type:"string" enum:"ArchiveStatus"` + // Indicates whether the object uses an S3 Bucket Key for server-side encryption + // with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Specifies caching behavior along the request/reply chain. CacheControl *string `location:"header" locationName:"Cache-Control" type:"string"` @@ -21508,13 +21591,13 @@ type HeadObjectOutput struct { PartsCount *int64 `location:"header" locationName:"x-amz-mp-parts-count" type:"integer"` // Amazon S3 can return this header if your request involves a bucket that is - // either a source or destination in a replication rule. + // either a source or a destination in a replication rule. // // In replication, you have a source bucket on which you configure replication - // and destination bucket where Amazon S3 stores object replicas. When you request - // an object (GetObject) or object metadata (HeadObject) from these buckets, - // Amazon S3 will return the x-amz-replication-status header in the response - // as follows: + // and destination bucket or buckets where Amazon S3 stores object replicas. + // When you request an object (GetObject) or object metadata (HeadObject) from + // these buckets, Amazon S3 will return the x-amz-replication-status header + // in the response as follows: // // * If requesting an object from the source bucket — Amazon S3 will return // the x-amz-replication-status header if the object in your request is eligible @@ -21526,9 +21609,17 @@ type HeadObjectOutput struct { // header with value PENDING, COMPLETED or FAILED indicating object replication // status. // - // * If requesting an object from the destination bucket — Amazon S3 will + // * If requesting an object from a destination bucket — Amazon S3 will // return the x-amz-replication-status header with value REPLICA if the object - // in your request is a replica that Amazon S3 created. + // in your request is a replica that Amazon S3 created and there is no replica + // modification replication in progress. + // + // * When replicating objects to multiple destination buckets the x-amz-replication-status + // header acts differently. The header of the source object will only return + // a value of COMPLETED when replication is successful to all destinations. + // The header will remain at value PENDING until replication has completed + // for all destinations. If one or more destinations fails replication the + // header will return FAILED. // // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html). ReplicationStatus *string `location:"header" locationName:"x-amz-replication-status" type:"string" enum:"ReplicationStatus"` @@ -21612,6 +21703,12 @@ func (s *HeadObjectOutput) SetArchiveStatus(v string) *HeadObjectOutput { return s } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *HeadObjectOutput) SetBucketKeyEnabled(v bool) *HeadObjectOutput { + s.BucketKeyEnabled = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *HeadObjectOutput) SetCacheControl(v string) *HeadObjectOutput { s.CacheControl = &v @@ -29645,6 +29742,15 @@ type PutObjectInput struct { // Bucket is a required field Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"` + // Specifies whether Amazon S3 should use an S3 Bucket Key for object encryption + // with server-side encryption using AWS KMS (SSE-KMS). Setting this header + // to true causes Amazon S3 to use an S3 Bucket Key for object encryption with + // SSE-KMS. + // + // Specifying this header with a PUT operation doesn’t affect bucket-level + // settings for S3 Bucket Key. + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Can be used to specify caching behavior along the request/reply chain. For // more information, see http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9 // (http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.9). @@ -29861,6 +29967,12 @@ func (s *PutObjectInput) getBucket() (v string) { return *s.Bucket } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectInput) SetBucketKeyEnabled(v bool) *PutObjectInput { + s.BucketKeyEnabled = &v + return s +} + // SetCacheControl sets the CacheControl field's value. func (s *PutObjectInput) SetCacheControl(v string) *PutObjectInput { s.CacheControl = &v @@ -30374,6 +30486,10 @@ func (s *PutObjectLockConfigurationOutput) SetRequestCharged(v string) *PutObjec type PutObjectOutput struct { _ struct{} `type:"structure"` + // Indicates whether the uploaded object uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` @@ -30429,6 +30545,12 @@ func (s PutObjectOutput) GoString() string { return s.String() } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *PutObjectOutput) SetBucketKeyEnabled(v bool) *PutObjectOutput { + s.BucketKeyEnabled = &v + return s +} + // SetETag sets the ETag field's value. func (s *PutObjectOutput) SetETag(v string) *PutObjectOutput { s.ETag = &v @@ -31251,6 +31373,53 @@ func (s *RedirectAllRequestsTo) SetProtocol(v string) *RedirectAllRequestsTo { return s } +// A filter that you can specify for selection for modifications on replicas. +// Amazon S3 doesn't replicate replica modifications by default. In the latest +// version of replication configuration (when Filter is specified), you can +// specify this element and set the status to Enabled to replicate modifications +// on replicas. +// +// If you don't specify the Filter element, Amazon S3 assumes that the replication +// configuration is the earlier version, V1. In the earlier version, this element +// is not allowed. +type ReplicaModifications struct { + _ struct{} `type:"structure"` + + // Specifies whether Amazon S3 replicates modifications on replicas. + // + // Status is a required field + Status *string `type:"string" required:"true" enum:"ReplicaModificationsStatus"` +} + +// String returns the string representation +func (s ReplicaModifications) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ReplicaModifications) GoString() string { + return s.String() +} + +// Validate inspects the fields of the type to determine if they are valid. +func (s *ReplicaModifications) Validate() error { + invalidParams := request.ErrInvalidParams{Context: "ReplicaModifications"} + if s.Status == nil { + invalidParams.Add(request.NewErrParamRequired("Status")) + } + + if invalidParams.Len() > 0 { + return invalidParams + } + return nil +} + +// SetStatus sets the Status field's value. +func (s *ReplicaModifications) SetStatus(v string) *ReplicaModifications { + s.Status = &v + return s +} + // A container for replication rules. You can add up to 1,000 rules. The maximum // size of a replication configuration is 2 MB. type ReplicationConfiguration struct { @@ -31363,16 +31532,11 @@ type ReplicationRule struct { // Deprecated: Prefix has been deprecated Prefix *string `deprecated:"true" type:"string"` - // The priority associated with the rule. If you specify multiple rules in a - // replication configuration, Amazon S3 prioritizes the rules to prevent conflicts - // when filtering. If two or more rules identify the same object based on a - // specified filter, the rule with higher priority takes precedence. For example: - // - // * Same object quality prefix-based filter criteria if prefixes you specified - // in multiple rules overlap - // - // * Same object qualify tag-based filter criteria specified in multiple - // rules + // The priority indicates which rule has precedence whenever two or more replication + // rules conflict. Amazon S3 will attempt to replicate objects according to + // all replication rules. However, if there are two or more rules with the same + // destination bucket, then objects will be replicated according to the rule + // with the highest priority. The higher the number, the higher the priority. // // For more information, see Replication (https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html) // in the Amazon Simple Storage Service Developer Guide. @@ -32997,6 +33161,15 @@ type ServerSideEncryptionRule struct { // bucket. If a PUT Object request doesn't specify any server-side encryption, // this default encryption will be applied. ApplyServerSideEncryptionByDefault *ServerSideEncryptionByDefault `type:"structure"` + + // Specifies whether Amazon S3 should use an S3 Bucket Key with server-side + // encryption using KMS (SSE-KMS) for new objects in the bucket. Existing objects + // are not affected. Setting the BucketKeyEnabled element to true causes Amazon + // S3 to use an S3 Bucket Key. By default, S3 Bucket Key is not enabled. + // + // For more information, see Amazon S3 Bucket Keys (https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html) + // in the Amazon Simple Storage Service Developer Guide. + BucketKeyEnabled *bool `type:"boolean"` } // String returns the string representation @@ -33030,6 +33203,12 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv return s } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *ServerSideEncryptionRule) SetBucketKeyEnabled(v bool) *ServerSideEncryptionRule { + s.BucketKeyEnabled = &v + return s +} + // A container that describes additional filters for identifying the source // objects that you want to replicate. You can choose to enable or disable the // replication of these objects. Currently, Amazon S3 supports only the filter @@ -33038,6 +33217,17 @@ func (s *ServerSideEncryptionRule) SetApplyServerSideEncryptionByDefault(v *Serv type SourceSelectionCriteria struct { _ struct{} `type:"structure"` + // A filter that you can specify for selections for modifications on replicas. + // Amazon S3 doesn't replicate replica modifications by default. In the latest + // version of replication configuration (when Filter is specified), you can + // specify this element and set the status to Enabled to replicate modifications + // on replicas. + // + // If you don't specify the Filter element, Amazon S3 assumes that the replication + // configuration is the earlier version, V1. In the earlier version, this element + // is not allowed + ReplicaModifications *ReplicaModifications `type:"structure"` + // A container for filter information for the selection of Amazon S3 objects // encrypted with AWS KMS. If you include SourceSelectionCriteria in the replication // configuration, this element is required. @@ -33057,6 +33247,11 @@ func (s SourceSelectionCriteria) GoString() string { // Validate inspects the fields of the type to determine if they are valid. func (s *SourceSelectionCriteria) Validate() error { invalidParams := request.ErrInvalidParams{Context: "SourceSelectionCriteria"} + if s.ReplicaModifications != nil { + if err := s.ReplicaModifications.Validate(); err != nil { + invalidParams.AddNested("ReplicaModifications", err.(request.ErrInvalidParams)) + } + } if s.SseKmsEncryptedObjects != nil { if err := s.SseKmsEncryptedObjects.Validate(); err != nil { invalidParams.AddNested("SseKmsEncryptedObjects", err.(request.ErrInvalidParams)) @@ -33069,6 +33264,12 @@ func (s *SourceSelectionCriteria) Validate() error { return nil } +// SetReplicaModifications sets the ReplicaModifications field's value. +func (s *SourceSelectionCriteria) SetReplicaModifications(v *ReplicaModifications) *SourceSelectionCriteria { + s.ReplicaModifications = v + return s +} + // SetSseKmsEncryptedObjects sets the SseKmsEncryptedObjects field's value. func (s *SourceSelectionCriteria) SetSseKmsEncryptedObjects(v *SseKmsEncryptedObjects) *SourceSelectionCriteria { s.SseKmsEncryptedObjects = v @@ -33478,9 +33679,11 @@ type Tiering struct { // AccessTier is a required field AccessTier *string `type:"string" required:"true" enum:"IntelligentTieringAccessTier"` - // The number of days that you want your archived data to be accessible. The - // minimum number of days specified in the restore request must be at least - // 90 days. If a smaller value is specifed it will be ignored. + // The number of consecutive days of no access after which an object will be + // eligible to be transitioned to the corresponding tier. The minimum number + // of days specified for Archive Access tier must be at least 90 days and Deep + // Archive Access tier must be at least 180 days. The maximum can be up to 2 + // years (730 days). // // Days is a required field Days *int64 `type:"integer" required:"true"` @@ -34055,6 +34258,10 @@ func (s UploadPartCopyInput) updateArnableField(v string) (interface{}, error) { type UploadPartCopyOutput struct { _ struct{} `type:"structure" payload:"CopyPartResult"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Container for all response elements. CopyPartResult *CopyPartResult `type:"structure"` @@ -34096,6 +34303,12 @@ func (s UploadPartCopyOutput) GoString() string { return s.String() } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartCopyOutput) SetBucketKeyEnabled(v bool) *UploadPartCopyOutput { + s.BucketKeyEnabled = &v + return s +} + // SetCopyPartResult sets the CopyPartResult field's value. func (s *UploadPartCopyOutput) SetCopyPartResult(v *CopyPartResult) *UploadPartCopyOutput { s.CopyPartResult = v @@ -34373,6 +34586,10 @@ func (s UploadPartInput) updateArnableField(v string) (interface{}, error) { type UploadPartOutput struct { _ struct{} `type:"structure"` + // Indicates whether the multipart upload uses an S3 Bucket Key for server-side + // encryption with AWS KMS (SSE-KMS). + BucketKeyEnabled *bool `location:"header" locationName:"x-amz-server-side-encryption-bucket-key-enabled" type:"boolean"` + // Entity tag for the uploaded object. ETag *string `location:"header" locationName:"ETag" type:"string"` @@ -34409,6 +34626,12 @@ func (s UploadPartOutput) GoString() string { return s.String() } +// SetBucketKeyEnabled sets the BucketKeyEnabled field's value. +func (s *UploadPartOutput) SetBucketKeyEnabled(v bool) *UploadPartOutput { + s.BucketKeyEnabled = &v + return s +} + // SetETag sets the ETag field's value. func (s *UploadPartOutput) SetETag(v string) *UploadPartOutput { s.ETag = &v @@ -35469,6 +35692,22 @@ func QuoteFields_Values() []string { } } +const ( + // ReplicaModificationsStatusEnabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusEnabled = "Enabled" + + // ReplicaModificationsStatusDisabled is a ReplicaModificationsStatus enum value + ReplicaModificationsStatusDisabled = "Disabled" +) + +// ReplicaModificationsStatus_Values returns all elements of the ReplicaModificationsStatus enum +func ReplicaModificationsStatus_Values() []string { + return []string{ + ReplicaModificationsStatusEnabled, + ReplicaModificationsStatusDisabled, + } +} + const ( // ReplicationRuleStatusEnabled is a ReplicationRuleStatus enum value ReplicationRuleStatusEnabled = "Enabled" diff --git a/vendor/github.com/blang/semver/.travis.yml b/vendor/github.com/blang/semver/.travis.yml deleted file mode 100644 index 102fb9a691b..00000000000 --- a/vendor/github.com/blang/semver/.travis.yml +++ /dev/null @@ -1,21 +0,0 @@ -language: go -matrix: - include: - - go: 1.4.3 - - go: 1.5.4 - - go: 1.6.3 - - go: 1.7 - - go: tip - allow_failures: - - go: tip -install: -- go get golang.org/x/tools/cmd/cover -- go get github.com/mattn/goveralls -script: -- echo "Test and track coverage" ; $HOME/gopath/bin/goveralls -package "." -service=travis-ci - -repotoken $COVERALLS_TOKEN -- echo "Build examples" ; cd examples && go build -- echo "Check if gofmt'd" ; diff -u <(echo -n) <(gofmt -d -s .) -env: - global: - secure: HroGEAUQpVq9zX1b1VIkraLiywhGbzvNnTZq2TMxgK7JHP8xqNplAeF1izrR2i4QLL9nsY+9WtYss4QuPvEtZcVHUobw6XnL6radF7jS1LgfYZ9Y7oF+zogZ2I5QUMRLGA7rcxQ05s7mKq3XZQfeqaNts4bms/eZRefWuaFZbkw= diff --git a/vendor/github.com/blang/semver/LICENSE b/vendor/github.com/blang/semver/LICENSE deleted file mode 100644 index 5ba5c86fcb0..00000000000 --- a/vendor/github.com/blang/semver/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License - -Copyright (c) 2014 Benedikt Lang - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - diff --git a/vendor/github.com/blang/semver/README.md b/vendor/github.com/blang/semver/README.md deleted file mode 100644 index 08b2e4a3d76..00000000000 --- a/vendor/github.com/blang/semver/README.md +++ /dev/null @@ -1,194 +0,0 @@ -semver for golang [![Build Status](https://travis-ci.org/blang/semver.svg?branch=master)](https://travis-ci.org/blang/semver) [![GoDoc](https://godoc.org/github.com/blang/semver?status.png)](https://godoc.org/github.com/blang/semver) [![Coverage Status](https://img.shields.io/coveralls/blang/semver.svg)](https://coveralls.io/r/blang/semver?branch=master) -====== - -semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`. - -Usage ------ -```bash -$ go get github.com/blang/semver -``` -Note: Always vendor your dependencies or fix on a specific version tag. - -```go -import github.com/blang/semver -v1, err := semver.Make("1.0.0-beta") -v2, err := semver.Make("2.0.0-beta") -v1.Compare(v2) -``` - -Also check the [GoDocs](http://godoc.org/github.com/blang/semver). - -Why should I use this lib? ------ - -- Fully spec compatible -- No reflection -- No regex -- Fully tested (Coverage >99%) -- Readable parsing/validation errors -- Fast (See [Benchmarks](#benchmarks)) -- Only Stdlib -- Uses values instead of pointers -- Many features, see below - - -Features ------ - -- Parsing and validation at all levels -- Comparator-like comparisons -- Compare Helper Methods -- InPlace manipulation -- Ranges `>=1.0.0 <2.0.0 || >=3.0.0 !3.0.1-beta.1` -- Wildcards `>=1.x`, `<=2.5.x` -- Sortable (implements sort.Interface) -- database/sql compatible (sql.Scanner/Valuer) -- encoding/json compatible (json.Marshaler/Unmarshaler) - -Ranges ------- - -A `Range` is a set of conditions which specify which versions satisfy the range. - -A condition is composed of an operator and a version. The supported operators are: - -- `<1.0.0` Less than `1.0.0` -- `<=1.0.0` Less than or equal to `1.0.0` -- `>1.0.0` Greater than `1.0.0` -- `>=1.0.0` Greater than or equal to `1.0.0` -- `1.0.0`, `=1.0.0`, `==1.0.0` Equal to `1.0.0` -- `!1.0.0`, `!=1.0.0` Not equal to `1.0.0`. Excludes version `1.0.0`. - -Note that spaces between the operator and the version will be gracefully tolerated. - -A `Range` can link multiple `Ranges` separated by space: - -Ranges can be linked by logical AND: - - - `>1.0.0 <2.0.0` would match between both ranges, so `1.1.1` and `1.8.7` but not `1.0.0` or `2.0.0` - - `>1.0.0 <3.0.0 !2.0.3-beta.2` would match every version between `1.0.0` and `3.0.0` except `2.0.3-beta.2` - -Ranges can also be linked by logical OR: - - - `<2.0.0 || >=3.0.0` would match `1.x.x` and `3.x.x` but not `2.x.x` - -AND has a higher precedence than OR. It's not possible to use brackets. - -Ranges can be combined by both AND and OR - - - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` - -Range usage: - -``` -v, err := semver.Parse("1.2.3") -range, err := semver.ParseRange(">1.0.0 <2.0.0 || >=3.0.0") -if range(v) { - //valid -} - -``` - -Example ------ - -Have a look at full examples in [examples/main.go](examples/main.go) - -```go -import github.com/blang/semver - -v, err := semver.Make("0.0.1-alpha.preview+123.github") -fmt.Printf("Major: %d\n", v.Major) -fmt.Printf("Minor: %d\n", v.Minor) -fmt.Printf("Patch: %d\n", v.Patch) -fmt.Printf("Pre: %s\n", v.Pre) -fmt.Printf("Build: %s\n", v.Build) - -// Prerelease versions array -if len(v.Pre) > 0 { - fmt.Println("Prerelease versions:") - for i, pre := range v.Pre { - fmt.Printf("%d: %q\n", i, pre) - } -} - -// Build meta data array -if len(v.Build) > 0 { - fmt.Println("Build meta data:") - for i, build := range v.Build { - fmt.Printf("%d: %q\n", i, build) - } -} - -v001, err := semver.Make("0.0.1") -// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE -v001.GT(v) == true -v.LT(v001) == true -v.GTE(v) == true -v.LTE(v) == true - -// Or use v.Compare(v2) for comparisons (-1, 0, 1): -v001.Compare(v) == 1 -v.Compare(v001) == -1 -v.Compare(v) == 0 - -// Manipulate Version in place: -v.Pre[0], err = semver.NewPRVersion("beta") -if err != nil { - fmt.Printf("Error parsing pre release version: %q", err) -} - -fmt.Println("\nValidate versions:") -v.Build[0] = "?" - -err = v.Validate() -if err != nil { - fmt.Printf("Validation failed: %s\n", err) -} -``` - - -Benchmarks ------ - - BenchmarkParseSimple-4 5000000 390 ns/op 48 B/op 1 allocs/op - BenchmarkParseComplex-4 1000000 1813 ns/op 256 B/op 7 allocs/op - BenchmarkParseAverage-4 1000000 1171 ns/op 163 B/op 4 allocs/op - BenchmarkStringSimple-4 20000000 119 ns/op 16 B/op 1 allocs/op - BenchmarkStringLarger-4 10000000 206 ns/op 32 B/op 2 allocs/op - BenchmarkStringComplex-4 5000000 324 ns/op 80 B/op 3 allocs/op - BenchmarkStringAverage-4 5000000 273 ns/op 53 B/op 2 allocs/op - BenchmarkValidateSimple-4 200000000 9.33 ns/op 0 B/op 0 allocs/op - BenchmarkValidateComplex-4 3000000 469 ns/op 0 B/op 0 allocs/op - BenchmarkValidateAverage-4 5000000 256 ns/op 0 B/op 0 allocs/op - BenchmarkCompareSimple-4 100000000 11.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareComplex-4 50000000 30.8 ns/op 0 B/op 0 allocs/op - BenchmarkCompareAverage-4 30000000 41.5 ns/op 0 B/op 0 allocs/op - BenchmarkSort-4 3000000 419 ns/op 256 B/op 2 allocs/op - BenchmarkRangeParseSimple-4 2000000 850 ns/op 192 B/op 5 allocs/op - BenchmarkRangeParseAverage-4 1000000 1677 ns/op 400 B/op 10 allocs/op - BenchmarkRangeParseComplex-4 300000 5214 ns/op 1440 B/op 30 allocs/op - BenchmarkRangeMatchSimple-4 50000000 25.6 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchAverage-4 30000000 56.4 ns/op 0 B/op 0 allocs/op - BenchmarkRangeMatchComplex-4 10000000 153 ns/op 0 B/op 0 allocs/op - -See benchmark cases at [semver_test.go](semver_test.go) - - -Motivation ------ - -I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like. - - -Contribution ------ - -Feel free to make a pull request. For bigger changes create a issue first to discuss about it. - - -License ------ - -See [LICENSE](LICENSE) file. diff --git a/vendor/github.com/blang/semver/json.go b/vendor/github.com/blang/semver/json.go deleted file mode 100644 index a74bf7c4494..00000000000 --- a/vendor/github.com/blang/semver/json.go +++ /dev/null @@ -1,23 +0,0 @@ -package semver - -import ( - "encoding/json" -) - -// MarshalJSON implements the encoding/json.Marshaler interface. -func (v Version) MarshalJSON() ([]byte, error) { - return json.Marshal(v.String()) -} - -// UnmarshalJSON implements the encoding/json.Unmarshaler interface. -func (v *Version) UnmarshalJSON(data []byte) (err error) { - var versionString string - - if err = json.Unmarshal(data, &versionString); err != nil { - return - } - - *v, err = Parse(versionString) - - return -} diff --git a/vendor/github.com/blang/semver/package.json b/vendor/github.com/blang/semver/package.json deleted file mode 100644 index 1cf8ebdd9c1..00000000000 --- a/vendor/github.com/blang/semver/package.json +++ /dev/null @@ -1,17 +0,0 @@ -{ - "author": "blang", - "bugs": { - "URL": "https://github.com/blang/semver/issues", - "url": "https://github.com/blang/semver/issues" - }, - "gx": { - "dvcsimport": "github.com/blang/semver" - }, - "gxVersion": "0.10.0", - "language": "go", - "license": "MIT", - "name": "semver", - "releaseCmd": "git commit -a -m \"gx publish $VERSION\"", - "version": "3.5.1" -} - diff --git a/vendor/github.com/blang/semver/range.go b/vendor/github.com/blang/semver/range.go deleted file mode 100644 index fca406d4793..00000000000 --- a/vendor/github.com/blang/semver/range.go +++ /dev/null @@ -1,416 +0,0 @@ -package semver - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -type wildcardType int - -const ( - noneWildcard wildcardType = iota - majorWildcard wildcardType = 1 - minorWildcard wildcardType = 2 - patchWildcard wildcardType = 3 -) - -func wildcardTypefromInt(i int) wildcardType { - switch i { - case 1: - return majorWildcard - case 2: - return minorWildcard - case 3: - return patchWildcard - default: - return noneWildcard - } -} - -type comparator func(Version, Version) bool - -var ( - compEQ comparator = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 0 - } - compNE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) != 0 - } - compGT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == 1 - } - compGE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) >= 0 - } - compLT = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) == -1 - } - compLE = func(v1 Version, v2 Version) bool { - return v1.Compare(v2) <= 0 - } -) - -type versionRange struct { - v Version - c comparator -} - -// rangeFunc creates a Range from the given versionRange. -func (vr *versionRange) rangeFunc() Range { - return Range(func(v Version) bool { - return vr.c(v, vr.v) - }) -} - -// Range represents a range of versions. -// A Range can be used to check if a Version satisfies it: -// -// range, err := semver.ParseRange(">1.0.0 <2.0.0") -// range(semver.MustParse("1.1.1") // returns true -type Range func(Version) bool - -// OR combines the existing Range with another Range using logical OR. -func (rf Range) OR(f Range) Range { - return Range(func(v Version) bool { - return rf(v) || f(v) - }) -} - -// AND combines the existing Range with another Range using logical AND. -func (rf Range) AND(f Range) Range { - return Range(func(v Version) bool { - return rf(v) && f(v) - }) -} - -// ParseRange parses a range and returns a Range. -// If the range could not be parsed an error is returned. -// -// Valid ranges are: -// - "<1.0.0" -// - "<=1.0.0" -// - ">1.0.0" -// - ">=1.0.0" -// - "1.0.0", "=1.0.0", "==1.0.0" -// - "!1.0.0", "!=1.0.0" -// -// A Range can consist of multiple ranges separated by space: -// Ranges can be linked by logical AND: -// - ">1.0.0 <2.0.0" would match between both ranges, so "1.1.1" and "1.8.7" but not "1.0.0" or "2.0.0" -// - ">1.0.0 <3.0.0 !2.0.3-beta.2" would match every version between 1.0.0 and 3.0.0 except 2.0.3-beta.2 -// -// Ranges can also be linked by logical OR: -// - "<2.0.0 || >=3.0.0" would match "1.x.x" and "3.x.x" but not "2.x.x" -// -// AND has a higher precedence than OR. It's not possible to use brackets. -// -// Ranges can be combined by both AND and OR -// -// - `>1.0.0 <2.0.0 || >3.0.0 !4.2.1` would match `1.2.3`, `1.9.9`, `3.1.1`, but not `4.2.1`, `2.1.1` -func ParseRange(s string) (Range, error) { - parts := splitAndTrim(s) - orParts, err := splitORParts(parts) - if err != nil { - return nil, err - } - expandedParts, err := expandWildcardVersion(orParts) - if err != nil { - return nil, err - } - var orFn Range - for _, p := range expandedParts { - var andFn Range - for _, ap := range p { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - vr, err := buildVersionRange(opStr, vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse Range %q: %s", ap, err) - } - rf := vr.rangeFunc() - - // Set function - if andFn == nil { - andFn = rf - } else { // Combine with existing function - andFn = andFn.AND(rf) - } - } - if orFn == nil { - orFn = andFn - } else { - orFn = orFn.OR(andFn) - } - - } - return orFn, nil -} - -// splitORParts splits the already cleaned parts by '||'. -// Checks for invalid positions of the operator and returns an -// error if found. -func splitORParts(parts []string) ([][]string, error) { - var ORparts [][]string - last := 0 - for i, p := range parts { - if p == "||" { - if i == 0 { - return nil, fmt.Errorf("First element in range is '||'") - } - ORparts = append(ORparts, parts[last:i]) - last = i + 1 - } - } - if last == len(parts) { - return nil, fmt.Errorf("Last element in range is '||'") - } - ORparts = append(ORparts, parts[last:]) - return ORparts, nil -} - -// buildVersionRange takes a slice of 2: operator and version -// and builds a versionRange, otherwise an error. -func buildVersionRange(opStr, vStr string) (*versionRange, error) { - c := parseComparator(opStr) - if c == nil { - return nil, fmt.Errorf("Could not parse comparator %q in %q", opStr, strings.Join([]string{opStr, vStr}, "")) - } - v, err := Parse(vStr) - if err != nil { - return nil, fmt.Errorf("Could not parse version %q in %q: %s", vStr, strings.Join([]string{opStr, vStr}, ""), err) - } - - return &versionRange{ - v: v, - c: c, - }, nil - -} - -// inArray checks if a byte is contained in an array of bytes -func inArray(s byte, list []byte) bool { - for _, el := range list { - if el == s { - return true - } - } - return false -} - -// splitAndTrim splits a range string by spaces and cleans whitespaces -func splitAndTrim(s string) (result []string) { - last := 0 - var lastChar byte - excludeFromSplit := []byte{'>', '<', '='} - for i := 0; i < len(s); i++ { - if s[i] == ' ' && !inArray(lastChar, excludeFromSplit) { - if last < i-1 { - result = append(result, s[last:i]) - } - last = i + 1 - } else if s[i] != ' ' { - lastChar = s[i] - } - } - if last < len(s)-1 { - result = append(result, s[last:]) - } - - for i, v := range result { - result[i] = strings.Replace(v, " ", "", -1) - } - - // parts := strings.Split(s, " ") - // for _, x := range parts { - // if s := strings.TrimSpace(x); len(s) != 0 { - // result = append(result, s) - // } - // } - return -} - -// splitComparatorVersion splits the comparator from the version. -// Input must be free of leading or trailing spaces. -func splitComparatorVersion(s string) (string, string, error) { - i := strings.IndexFunc(s, unicode.IsDigit) - if i == -1 { - return "", "", fmt.Errorf("Could not get version from string: %q", s) - } - return strings.TrimSpace(s[0:i]), s[i:], nil -} - -// getWildcardType will return the type of wildcard that the -// passed version contains -func getWildcardType(vStr string) wildcardType { - parts := strings.Split(vStr, ".") - nparts := len(parts) - wildcard := parts[nparts-1] - - possibleWildcardType := wildcardTypefromInt(nparts) - if wildcard == "x" { - return possibleWildcardType - } - - return noneWildcard -} - -// createVersionFromWildcard will convert a wildcard version -// into a regular version, replacing 'x's with '0's, handling -// special cases like '1.x.x' and '1.x' -func createVersionFromWildcard(vStr string) string { - // handle 1.x.x - vStr2 := strings.Replace(vStr, ".x.x", ".x", 1) - vStr2 = strings.Replace(vStr2, ".x", ".0", 1) - parts := strings.Split(vStr2, ".") - - // handle 1.x - if len(parts) == 2 { - return vStr2 + ".0" - } - - return vStr2 -} - -// incrementMajorVersion will increment the major version -// of the passed version -func incrementMajorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[0]) - if err != nil { - return "", err - } - parts[0] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// incrementMajorVersion will increment the minor version -// of the passed version -func incrementMinorVersion(vStr string) (string, error) { - parts := strings.Split(vStr, ".") - i, err := strconv.Atoi(parts[1]) - if err != nil { - return "", err - } - parts[1] = strconv.Itoa(i + 1) - - return strings.Join(parts, "."), nil -} - -// expandWildcardVersion will expand wildcards inside versions -// following these rules: -// -// * when dealing with patch wildcards: -// >= 1.2.x will become >= 1.2.0 -// <= 1.2.x will become < 1.3.0 -// > 1.2.x will become >= 1.3.0 -// < 1.2.x will become < 1.2.0 -// != 1.2.x will become < 1.2.0 >= 1.3.0 -// -// * when dealing with minor wildcards: -// >= 1.x will become >= 1.0.0 -// <= 1.x will become < 2.0.0 -// > 1.x will become >= 2.0.0 -// < 1.0 will become < 1.0.0 -// != 1.x will become < 1.0.0 >= 2.0.0 -// -// * when dealing with wildcards without -// version operator: -// 1.2.x will become >= 1.2.0 < 1.3.0 -// 1.x will become >= 1.0.0 < 2.0.0 -func expandWildcardVersion(parts [][]string) ([][]string, error) { - var expandedParts [][]string - for _, p := range parts { - var newParts []string - for _, ap := range p { - if strings.Index(ap, "x") != -1 { - opStr, vStr, err := splitComparatorVersion(ap) - if err != nil { - return nil, err - } - - versionWildcardType := getWildcardType(vStr) - flatVersion := createVersionFromWildcard(vStr) - - var resultOperator string - var shouldIncrementVersion bool - switch opStr { - case ">": - resultOperator = ">=" - shouldIncrementVersion = true - case ">=": - resultOperator = ">=" - case "<": - resultOperator = "<" - case "<=": - resultOperator = "<" - shouldIncrementVersion = true - case "", "=", "==": - newParts = append(newParts, ">="+flatVersion) - resultOperator = "<" - shouldIncrementVersion = true - case "!=", "!": - newParts = append(newParts, "<"+flatVersion) - resultOperator = ">=" - shouldIncrementVersion = true - } - - var resultVersion string - if shouldIncrementVersion { - switch versionWildcardType { - case patchWildcard: - resultVersion, _ = incrementMinorVersion(flatVersion) - case minorWildcard: - resultVersion, _ = incrementMajorVersion(flatVersion) - } - } else { - resultVersion = flatVersion - } - - ap = resultOperator + resultVersion - } - newParts = append(newParts, ap) - } - expandedParts = append(expandedParts, newParts) - } - - return expandedParts, nil -} - -func parseComparator(s string) comparator { - switch s { - case "==": - fallthrough - case "": - fallthrough - case "=": - return compEQ - case ">": - return compGT - case ">=": - return compGE - case "<": - return compLT - case "<=": - return compLE - case "!": - fallthrough - case "!=": - return compNE - } - - return nil -} - -// MustParseRange is like ParseRange but panics if the range cannot be parsed. -func MustParseRange(s string) Range { - r, err := ParseRange(s) - if err != nil { - panic(`semver: ParseRange(` + s + `): ` + err.Error()) - } - return r -} diff --git a/vendor/github.com/blang/semver/semver.go b/vendor/github.com/blang/semver/semver.go deleted file mode 100644 index 8ee0842e6ac..00000000000 --- a/vendor/github.com/blang/semver/semver.go +++ /dev/null @@ -1,418 +0,0 @@ -package semver - -import ( - "errors" - "fmt" - "strconv" - "strings" -) - -const ( - numbers string = "0123456789" - alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" - alphanum = alphas + numbers -) - -// SpecVersion is the latest fully supported spec version of semver -var SpecVersion = Version{ - Major: 2, - Minor: 0, - Patch: 0, -} - -// Version represents a semver compatible version -type Version struct { - Major uint64 - Minor uint64 - Patch uint64 - Pre []PRVersion - Build []string //No Precendence -} - -// Version to string -func (v Version) String() string { - b := make([]byte, 0, 5) - b = strconv.AppendUint(b, v.Major, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Minor, 10) - b = append(b, '.') - b = strconv.AppendUint(b, v.Patch, 10) - - if len(v.Pre) > 0 { - b = append(b, '-') - b = append(b, v.Pre[0].String()...) - - for _, pre := range v.Pre[1:] { - b = append(b, '.') - b = append(b, pre.String()...) - } - } - - if len(v.Build) > 0 { - b = append(b, '+') - b = append(b, v.Build[0]...) - - for _, build := range v.Build[1:] { - b = append(b, '.') - b = append(b, build...) - } - } - - return string(b) -} - -// Equals checks if v is equal to o. -func (v Version) Equals(o Version) bool { - return (v.Compare(o) == 0) -} - -// EQ checks if v is equal to o. -func (v Version) EQ(o Version) bool { - return (v.Compare(o) == 0) -} - -// NE checks if v is not equal to o. -func (v Version) NE(o Version) bool { - return (v.Compare(o) != 0) -} - -// GT checks if v is greater than o. -func (v Version) GT(o Version) bool { - return (v.Compare(o) == 1) -} - -// GTE checks if v is greater than or equal to o. -func (v Version) GTE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// GE checks if v is greater than or equal to o. -func (v Version) GE(o Version) bool { - return (v.Compare(o) >= 0) -} - -// LT checks if v is less than o. -func (v Version) LT(o Version) bool { - return (v.Compare(o) == -1) -} - -// LTE checks if v is less than or equal to o. -func (v Version) LTE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// LE checks if v is less than or equal to o. -func (v Version) LE(o Version) bool { - return (v.Compare(o) <= 0) -} - -// Compare compares Versions v to o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v Version) Compare(o Version) int { - if v.Major != o.Major { - if v.Major > o.Major { - return 1 - } - return -1 - } - if v.Minor != o.Minor { - if v.Minor > o.Minor { - return 1 - } - return -1 - } - if v.Patch != o.Patch { - if v.Patch > o.Patch { - return 1 - } - return -1 - } - - // Quick comparison if a version has no prerelease versions - if len(v.Pre) == 0 && len(o.Pre) == 0 { - return 0 - } else if len(v.Pre) == 0 && len(o.Pre) > 0 { - return 1 - } else if len(v.Pre) > 0 && len(o.Pre) == 0 { - return -1 - } - - i := 0 - for ; i < len(v.Pre) && i < len(o.Pre); i++ { - if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 { - continue - } else if comp == 1 { - return 1 - } else { - return -1 - } - } - - // If all pr versions are the equal but one has further prversion, this one greater - if i == len(v.Pre) && i == len(o.Pre) { - return 0 - } else if i == len(v.Pre) && i < len(o.Pre) { - return -1 - } else { - return 1 - } - -} - -// Validate validates v and returns error in case -func (v Version) Validate() error { - // Major, Minor, Patch already validated using uint64 - - for _, pre := range v.Pre { - if !pre.IsNum { //Numeric prerelease versions already uint64 - if len(pre.VersionStr) == 0 { - return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr) - } - if !containsOnly(pre.VersionStr, alphanum) { - return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr) - } - } - } - - for _, build := range v.Build { - if len(build) == 0 { - return fmt.Errorf("Build meta data can not be empty %q", build) - } - if !containsOnly(build, alphanum) { - return fmt.Errorf("Invalid character(s) found in build meta data %q", build) - } - } - - return nil -} - -// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error -func New(s string) (vp *Version, err error) { - v, err := Parse(s) - vp = &v - return -} - -// Make is an alias for Parse, parses version string and returns a validated Version or error -func Make(s string) (Version, error) { - return Parse(s) -} - -// ParseTolerant allows for certain version specifications that do not strictly adhere to semver -// specs to be parsed by this library. It does so by normalizing versions before passing them to -// Parse(). It currently trims spaces, removes a "v" prefix, and adds a 0 patch number to versions -// with only major and minor components specified -func ParseTolerant(s string) (Version, error) { - s = strings.TrimSpace(s) - s = strings.TrimPrefix(s, "v") - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) < 3 { - if strings.ContainsAny(parts[len(parts)-1], "+-") { - return Version{}, errors.New("Short version cannot contain PreRelease/Build meta data") - } - for len(parts) < 3 { - parts = append(parts, "0") - } - s = strings.Join(parts, ".") - } - - return Parse(s) -} - -// Parse parses version string and returns a validated Version or error -func Parse(s string) (Version, error) { - if len(s) == 0 { - return Version{}, errors.New("Version string empty") - } - - // Split into major.minor.(patch+pr+meta) - parts := strings.SplitN(s, ".", 3) - if len(parts) != 3 { - return Version{}, errors.New("No Major.Minor.Patch elements found") - } - - // Major - if !containsOnly(parts[0], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0]) - } - if hasLeadingZeroes(parts[0]) { - return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0]) - } - major, err := strconv.ParseUint(parts[0], 10, 64) - if err != nil { - return Version{}, err - } - - // Minor - if !containsOnly(parts[1], numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1]) - } - if hasLeadingZeroes(parts[1]) { - return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1]) - } - minor, err := strconv.ParseUint(parts[1], 10, 64) - if err != nil { - return Version{}, err - } - - v := Version{} - v.Major = major - v.Minor = minor - - var build, prerelease []string - patchStr := parts[2] - - if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 { - build = strings.Split(patchStr[buildIndex+1:], ".") - patchStr = patchStr[:buildIndex] - } - - if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 { - prerelease = strings.Split(patchStr[preIndex+1:], ".") - patchStr = patchStr[:preIndex] - } - - if !containsOnly(patchStr, numbers) { - return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr) - } - if hasLeadingZeroes(patchStr) { - return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr) - } - patch, err := strconv.ParseUint(patchStr, 10, 64) - if err != nil { - return Version{}, err - } - - v.Patch = patch - - // Prerelease - for _, prstr := range prerelease { - parsedPR, err := NewPRVersion(prstr) - if err != nil { - return Version{}, err - } - v.Pre = append(v.Pre, parsedPR) - } - - // Build meta data - for _, str := range build { - if len(str) == 0 { - return Version{}, errors.New("Build meta data is empty") - } - if !containsOnly(str, alphanum) { - return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str) - } - v.Build = append(v.Build, str) - } - - return v, nil -} - -// MustParse is like Parse but panics if the version cannot be parsed. -func MustParse(s string) Version { - v, err := Parse(s) - if err != nil { - panic(`semver: Parse(` + s + `): ` + err.Error()) - } - return v -} - -// PRVersion represents a PreRelease Version -type PRVersion struct { - VersionStr string - VersionNum uint64 - IsNum bool -} - -// NewPRVersion creates a new valid prerelease version -func NewPRVersion(s string) (PRVersion, error) { - if len(s) == 0 { - return PRVersion{}, errors.New("Prerelease is empty") - } - v := PRVersion{} - if containsOnly(s, numbers) { - if hasLeadingZeroes(s) { - return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s) - } - num, err := strconv.ParseUint(s, 10, 64) - - // Might never be hit, but just in case - if err != nil { - return PRVersion{}, err - } - v.VersionNum = num - v.IsNum = true - } else if containsOnly(s, alphanum) { - v.VersionStr = s - v.IsNum = false - } else { - return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s) - } - return v, nil -} - -// IsNumeric checks if prerelease-version is numeric -func (v PRVersion) IsNumeric() bool { - return v.IsNum -} - -// Compare compares two PreRelease Versions v and o: -// -1 == v is less than o -// 0 == v is equal to o -// 1 == v is greater than o -func (v PRVersion) Compare(o PRVersion) int { - if v.IsNum && !o.IsNum { - return -1 - } else if !v.IsNum && o.IsNum { - return 1 - } else if v.IsNum && o.IsNum { - if v.VersionNum == o.VersionNum { - return 0 - } else if v.VersionNum > o.VersionNum { - return 1 - } else { - return -1 - } - } else { // both are Alphas - if v.VersionStr == o.VersionStr { - return 0 - } else if v.VersionStr > o.VersionStr { - return 1 - } else { - return -1 - } - } -} - -// PreRelease version to string -func (v PRVersion) String() string { - if v.IsNum { - return strconv.FormatUint(v.VersionNum, 10) - } - return v.VersionStr -} - -func containsOnly(s string, set string) bool { - return strings.IndexFunc(s, func(r rune) bool { - return !strings.ContainsRune(set, r) - }) == -1 -} - -func hasLeadingZeroes(s string) bool { - return len(s) > 1 && s[0] == '0' -} - -// NewBuildVersion creates a new valid build version -func NewBuildVersion(s string) (string, error) { - if len(s) == 0 { - return "", errors.New("Buildversion is empty") - } - if !containsOnly(s, alphanum) { - return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s) - } - return s, nil -} diff --git a/vendor/github.com/blang/semver/sort.go b/vendor/github.com/blang/semver/sort.go deleted file mode 100644 index e18f880826a..00000000000 --- a/vendor/github.com/blang/semver/sort.go +++ /dev/null @@ -1,28 +0,0 @@ -package semver - -import ( - "sort" -) - -// Versions represents multiple versions. -type Versions []Version - -// Len returns length of version collection -func (s Versions) Len() int { - return len(s) -} - -// Swap swaps two versions inside the collection by its indices -func (s Versions) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -// Less checks if version at index i is less than version at index j -func (s Versions) Less(i, j int) bool { - return s[i].LT(s[j]) -} - -// Sort sorts a slice of versions -func Sort(versions []Version) { - sort.Sort(Versions(versions)) -} diff --git a/vendor/github.com/blang/semver/sql.go b/vendor/github.com/blang/semver/sql.go deleted file mode 100644 index eb4d802666e..00000000000 --- a/vendor/github.com/blang/semver/sql.go +++ /dev/null @@ -1,30 +0,0 @@ -package semver - -import ( - "database/sql/driver" - "fmt" -) - -// Scan implements the database/sql.Scanner interface. -func (v *Version) Scan(src interface{}) (err error) { - var str string - switch src := src.(type) { - case string: - str = src - case []byte: - str = string(src) - default: - return fmt.Errorf("Version.Scan: cannot convert %T to string.", src) - } - - if t, err := Parse(str); err == nil { - *v = t - } - - return -} - -// Value implements the database/sql/driver.Valuer interface. -func (v Version) Value() (driver.Value, error) { - return v.String(), nil -} diff --git a/vendor/github.com/cortexproject/cortex/integration/e2e/db/db.go b/vendor/github.com/cortexproject/cortex/integration/e2e/db/db.go index fe87b312515..fadd4b49a72 100644 --- a/vendor/github.com/cortexproject/cortex/integration/e2e/db/db.go +++ b/vendor/github.com/cortexproject/cortex/integration/e2e/db/db.go @@ -3,6 +3,7 @@ package e2edb import ( "fmt" "net/url" + "strings" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" @@ -19,11 +20,17 @@ const ( // NewMinio returns minio server, used as a local replacement for S3. func NewMinio(port int, bktName string) *e2e.HTTPService { + minioKESGithubContent := "https://raw.githubusercontent.com/minio/kes/master" + commands := []string{ + "curl -sSL --tlsv1.2 -O '%s/root.key' -O '%s/root.cert'", + "mkdir -p /data/%s && minio server --address :%v --quiet /data", + } + m := e2e.NewHTTPService( fmt.Sprintf("minio-%v", port), images.Minio, // Create the "cortex" bucket before starting minio - e2e.NewCommandWithoutEntrypoint("sh", "-c", fmt.Sprintf("mkdir -p /data/%s && minio server --address :%v --quiet /data", bktName, port)), + e2e.NewCommandWithoutEntrypoint("sh", "-c", fmt.Sprintf(strings.Join(commands, " && "), minioKESGithubContent, minioKESGithubContent, bktName, port)), e2e.NewHTTPReadinessProbe(port, "/minio/health/ready", 200, 200), port, ) @@ -32,6 +39,11 @@ func NewMinio(port int, bktName string) *e2e.HTTPService { "MINIO_SECRET_KEY": MinioSecretKey, "MINIO_BROWSER": "off", "ENABLE_HTTPS": "0", + // https://docs.min.io/docs/minio-kms-quickstart-guide.html + "MINIO_KMS_KES_ENDPOINT": "https://play.min.io:7373", + "MINIO_KMS_KES_KEY_FILE": "root.key", + "MINIO_KMS_KES_CERT_FILE": "root.cert", + "MINIO_KMS_KES_KEY_NAME": "my-minio-key", }) return m } diff --git a/vendor/github.com/cortexproject/cortex/integration/e2e/images/images.go b/vendor/github.com/cortexproject/cortex/integration/e2e/images/images.go index 19cfd5afb71..88ed766e21e 100644 --- a/vendor/github.com/cortexproject/cortex/integration/e2e/images/images.go +++ b/vendor/github.com/cortexproject/cortex/integration/e2e/images/images.go @@ -1,7 +1,7 @@ package images // If you change the image tag, remember to update it in the preloading done -// by CircleCI (see .circleci/config.yml) and GitHub actions (see .github/workflows/*). +// by GitHub actions (see .github/workflows/*). // These are variables so that they can be modified. diff --git a/vendor/github.com/cortexproject/cortex/integration/e2e/metrics.go b/vendor/github.com/cortexproject/cortex/integration/e2e/metrics.go index cdfa799e70b..18378fb447b 100644 --- a/vendor/github.com/cortexproject/cortex/integration/e2e/metrics.go +++ b/vendor/github.com/cortexproject/cortex/integration/e2e/metrics.go @@ -107,6 +107,16 @@ func Greater(value float64) func(sums ...float64) bool { } } +// GreaterOrEqual is an isExpected function for WaitSumMetrics that returns true if given single sum is greater or equal than given value. +func GreaterOrEqual(value float64) func(sums ...float64) bool { + return func(sums ...float64) bool { + if len(sums) != 1 { + panic("greater: expected one value") + } + return sums[0] >= value + } +} + // Less is an isExpected function for WaitSumMetrics that returns true if given single sum is less than given value. func Less(value float64) func(sums ...float64) bool { return func(sums ...float64) bool { diff --git a/vendor/github.com/cortexproject/cortex/integration/e2e/service.go b/vendor/github.com/cortexproject/cortex/integration/e2e/service.go index ffb8f211481..76d1dce90d8 100644 --- a/vendor/github.com/cortexproject/cortex/integration/e2e/service.go +++ b/vendor/github.com/cortexproject/cortex/integration/e2e/service.go @@ -220,7 +220,7 @@ func (s *ConcreteService) NetworkEndpoint(port int) string { // // This method return correct endpoint for the service in any state. func (s *ConcreteService) NetworkEndpointFor(networkName string, port int) string { - return fmt.Sprintf("%s:%d", containerName(networkName, s.name), port) + return fmt.Sprintf("%s:%d", NetworkContainerHost(networkName, s.name), port) } func (s *ConcreteService) SetReadinessProbe(probe ReadinessProbe) { @@ -240,12 +240,8 @@ func (s *ConcreteService) Ready() error { return s.readiness.Ready(s) } -func containerName(netName string, name string) string { - return fmt.Sprintf("%s-%s", netName, name) -} - func (s *ConcreteService) containerName() string { - return containerName(s.usedNetworkName, s.name) + return NetworkContainerHost(s.usedNetworkName, s.name) } func (s *ConcreteService) WaitForRunning() (err error) { @@ -355,6 +351,17 @@ func (s *ConcreteService) Exec(command *Command) (string, string, error) { return stdout.String(), stderr.String(), err } +// NetworkContainerHost return the hostname of the container within the network. This is +// the address a container should use to connect to other containers. +func NetworkContainerHost(networkName, containerName string) string { + return fmt.Sprintf("%s-%s", networkName, containerName) +} + +// NetworkContainerHostPort return the host:port address of a container within the network. +func NetworkContainerHostPort(networkName, containerName string, port int) string { + return fmt.Sprintf("%s-%s:%d", networkName, containerName, port) +} + type Command struct { cmd string args []string diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go index ceed4e7ba19..a93379bc7bb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager.go @@ -7,12 +7,13 @@ import ( "fmt" "net/http" "net/url" + "path" "path/filepath" + "strings" "sync" "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/prometheus/alertmanager/api" "github.com/prometheus/alertmanager/cluster" "github.com/prometheus/alertmanager/config" @@ -78,9 +79,6 @@ type Alertmanager struct { // Further, in upstream AM, this metric is handled using the config coordinator which we don't use // hence we need to generate the metric ourselves. configHashMetric prometheus.Gauge - - activeMtx sync.Mutex - active bool } var ( @@ -100,11 +98,9 @@ func init() { // New creates a new Alertmanager. func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { am := &Alertmanager{ - cfg: cfg, - logger: log.With(cfg.Logger, "user", cfg.UserID), - stop: make(chan struct{}), - active: false, - activeMtx: sync.Mutex{}, + cfg: cfg, + logger: log.With(cfg.Logger, "user", cfg.UserID), + stop: make(chan struct{}), configHashMetric: promauto.With(reg).NewGauge(prometheus.GaugeOpts{ Name: "alertmanager_config_hash", Help: "Hash of the currently loaded alertmanager configuration.", @@ -181,6 +177,17 @@ func New(cfg *Config, reg *prometheus.Registry) (*Alertmanager, error) { ui.Register(router, webReload, log.With(am.logger, "component", "ui")) am.mux = am.api.Register(router, am.cfg.ExternalURL.Path) + // Override some extra paths registered in the router (eg. /metrics which by default exposes prometheus.DefaultRegisterer). + // Entire router is registered in Mux to "/" path, so there is no conflict with overwriting specific paths. + for _, p := range []string{"/metrics", "/-/reload", "/debug/"} { + a := path.Join(am.cfg.ExternalURL.Path, p) + // Preserve end slash, as for Mux it means entire subtree. + if strings.HasSuffix(p, "/") { + a = a + "/" + } + am.mux.Handle(a, http.NotFoundHandler()) + } + am.dispatcherMetrics = dispatch.NewDispatcherMetrics(am.registry) return am, nil } @@ -256,55 +263,10 @@ func (am *Alertmanager) ApplyConfig(userID string, conf *config.Config, rawCfg s go am.dispatcher.Run() go am.inhibitor.Run() - // Ensure the alertmanager is set to active - am.activeMtx.Lock() - am.active = true - am.activeMtx.Unlock() - am.configHashMetric.Set(md5HashAsMetricValue([]byte(rawCfg))) return nil } -// IsActive returns if the alertmanager is currently running -// or is paused -func (am *Alertmanager) IsActive() bool { - am.activeMtx.Lock() - defer am.activeMtx.Unlock() - return am.active -} - -// Pause running jobs in the alertmanager that are able to be restarted and sets -// to inactives -func (am *Alertmanager) Pause() { - // Set to inactive - am.activeMtx.Lock() - am.active = false - am.activeMtx.Unlock() - - // Stop the inhibitor and dispatcher which will be recreated when - // a new config is applied - if am.inhibitor != nil { - am.inhibitor.Stop() - am.inhibitor = nil - } - if am.dispatcher != nil { - am.dispatcher.Stop() - am.dispatcher = nil - } - - // Remove all of the active silences from the alertmanager - silences, _, err := am.silences.Query() - if err != nil { - level.Warn(am.logger).Log("msg", "unable to retrieve silences for removal", "err", err) - } - for _, si := range silences { - err = am.silences.Expire(si.Id) - if err != nil { - level.Warn(am.logger).Log("msg", "unable to remove silence", "err", err, "silence", si.Id) - } - } -} - // Stop stops the Alertmanager. func (am *Alertmanager) Stop() { if am.inhibitor != nil { @@ -317,6 +279,10 @@ func (am *Alertmanager) Stop() { am.alerts.Close() close(am.stop) +} + +func (am *Alertmanager) StopAndWait() { + am.Stop() am.wg.Wait() } diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go new file mode 100644 index 00000000000..0efeebde15a --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_http.go @@ -0,0 +1,53 @@ +package alertmanager + +import ( + "net/http" + "text/template" + + "github.com/go-kit/kit/log/level" + + util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + statusPageTemplate = template.Must(template.New("main").Parse(` + + + + + Cortex Alertmanager Ring + + +

Cortex Alertmanager Ring

+

{{ .Message }}

+ + `)) +) + +func writeMessage(w http.ResponseWriter, message string) { + w.WriteHeader(http.StatusOK) + err := statusPageTemplate.Execute(w, struct { + Message string + }{Message: message}) + + if err != nil { + level.Error(util_log.Logger).Log("msg", "unable to serve alertmanager ring page", "err", err) + } +} + +func (am *MultitenantAlertmanager) RingHandler(w http.ResponseWriter, req *http.Request) { + if !am.cfg.ShardingEnabled { + writeMessage(w, "Alertmanager has no ring because sharding is disabled.") + return + } + + if am.State() != services.Running { + // we cannot read the ring before the alertmanager is in Running state, + // because that would lead to race condition. + writeMessage(w, "Alertmanager is not running yet.") + return + } + + am.ring.ServeHTTP(w, req) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go index d500bcafde5..2d2a3de7a16 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_metrics.go @@ -154,6 +154,12 @@ func (m *alertmanagerMetrics) addUserRegistry(user string, reg *prometheus.Regis m.regs.AddUserRegistry(user, reg) } +func (m *alertmanagerMetrics) removeUserRegistry(user string) { + // We neeed to go for a soft deletion here, as hard deletion requires + // that _all_ metrics except gauges are per-user. + m.regs.RemoveUserRegistry(user, false) +} + func (m *alertmanagerMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.alertsReceived out <- m.alertsInvalid diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go new file mode 100644 index 00000000000..9fe2d9dea1f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alertmanager_ring.go @@ -0,0 +1,114 @@ +package alertmanager + +import ( + "flag" + "fmt" + "os" + "time" + + "github.com/go-kit/kit/log/level" + + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" +) + +const ( + // RingKey is the key under which we store the alertmanager ring in the KVStore. + RingKey = "alertmanager" + + // RingNameForServer is the name of the ring used by the alertmanager server. + RingNameForServer = "alertmanager" + + // RingNumTokens is a safe default instead of exposing to config option to the user + // in order to simplify the config. + RingNumTokens = 128 +) + +// RingOp is the operation used for distributing tenants between alertmanagers. +var RingOp = ring.NewOp([]ring.IngesterState{ring.ACTIVE}, func(s ring.IngesterState) bool { + // Only ACTIVE Alertmanager get requests. If instance is not ACTIVE, we need to find another Alertmanager. + return s != ring.ACTIVE +}) + +// RingConfig masks the ring lifecycler config which contains +// many options not really required by the alertmanager ring. This config +// is used to strip down the config to the minimum, and avoid confusion +// to the user. +type RingConfig struct { + KVStore kv.Config `yaml:"kvstore" doc:"description=The key-value store used to share the hash ring across multiple instances."` + HeartbeatPeriod time.Duration `yaml:"heartbeat_period"` + HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` + ReplicationFactor int `yaml:"replication_factor"` + + // Instance details + InstanceID string `yaml:"instance_id" doc:"hidden"` + InstanceInterfaceNames []string `yaml:"instance_interface_names"` + InstancePort int `yaml:"instance_port" doc:"hidden"` + InstanceAddr string `yaml:"instance_addr" doc:"hidden"` + + // Injected internally + ListenPort int `yaml:"-"` + RingCheckPeriod time.Duration `yaml:"-"` + + // Used for testing + SkipUnregister bool `yaml:"-"` +} + +// RegisterFlags adds the flags required to config this to the given FlagSet +func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { + hostname, err := os.Hostname() + if err != nil { + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) + os.Exit(1) + } + + // Prefix used by all the ring flags + rfprefix := "alertmanager.sharding-ring." + + // Ring flags + cfg.KVStore.RegisterFlagsWithPrefix(rfprefix, "alertmanagers/", f) + f.DurationVar(&cfg.HeartbeatPeriod, rfprefix+"heartbeat-period", 15*time.Second, "Period at which to heartbeat to the ring.") + f.DurationVar(&cfg.HeartbeatTimeout, rfprefix+"heartbeat-timeout", time.Minute, "The heartbeat timeout after which alertmanagers are considered unhealthy within the ring.") + f.IntVar(&cfg.ReplicationFactor, rfprefix+"replication-factor", 3, "The replication factor to use when sharding the alertmanager.") + + // Instance flags + cfg.InstanceInterfaceNames = []string{"eth0", "en0"} + f.Var((*flagext.StringSlice)(&cfg.InstanceInterfaceNames), rfprefix+"instance-interface-names", "Name of network interface to read address from.") + f.StringVar(&cfg.InstanceAddr, rfprefix+"instance-addr", "", "IP address to advertise in the ring.") + f.IntVar(&cfg.InstancePort, rfprefix+"instance-port", 0, "Port to advertise in the ring (defaults to server.http-listen-port).") + f.StringVar(&cfg.InstanceID, rfprefix+"instance-id", hostname, "Instance ID to register in the ring.") + + cfg.RingCheckPeriod = 5 * time.Second +} + +// ToLifecyclerConfig returns a LifecyclerConfig based on the alertmanager +// ring config. +func (cfg *RingConfig) ToLifecyclerConfig() (ring.BasicLifecyclerConfig, error) { + instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames) + if err != nil { + return ring.BasicLifecyclerConfig{}, err + } + + instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) + + return ring.BasicLifecyclerConfig{ + ID: cfg.InstanceID, + Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), + HeartbeatPeriod: cfg.HeartbeatPeriod, + TokensObservePeriod: 0, + NumTokens: RingNumTokens, + }, nil +} + +func (cfg *RingConfig) ToRingConfig() ring.Config { + rc := ring.Config{} + flagext.DefaultValues(&rc) + + rc.KVStore = cfg.KVStore + rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.ReplicationFactor = cfg.ReplicationFactor + + return rc +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go index 02c1d4d733d..8b00eb79aa2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient/store.go @@ -10,7 +10,7 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Object Alert Storage Schema @@ -60,7 +60,7 @@ func (a *AlertStore) getAlertConfig(ctx context.Context, key string) (alerts.Ale return alerts.AlertConfigDesc{}, err } - defer runutil.CloseWithLogOnErr(util.Logger, readCloser, "close alert config reader") + defer runutil.CloseWithLogOnErr(util_log.Logger, readCloser, "close alert config reader") buf, err := ioutil.ReadAll(readCloser) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go index d2d3f09fa65..ae1fa09d01b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/api.go @@ -9,7 +9,7 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alerts" "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -34,7 +34,7 @@ type UserConfig struct { } func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http.Request) { - logger := util.WithContext(r.Context(), am.logger) + logger := util_log.WithContext(r.Context(), am.logger) userID, err := tenant.TenantID(r.Context()) if err != nil { @@ -72,7 +72,7 @@ func (am *MultitenantAlertmanager) GetUserConfig(w http.ResponseWriter, r *http. } func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http.Request) { - logger := util.WithContext(r.Context(), am.logger) + logger := util_log.WithContext(r.Context(), am.logger) userID, err := tenant.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) @@ -113,7 +113,7 @@ func (am *MultitenantAlertmanager) SetUserConfig(w http.ResponseWriter, r *http. } func (am *MultitenantAlertmanager) DeleteUserConfig(w http.ResponseWriter, r *http.Request) { - logger := util.WithContext(r.Context(), am.logger) + logger := util_log.WithContext(r.Context(), am.logger) userID, err := tenant.TenantID(r.Context()) if err != nil { level.Error(logger).Log("msg", errNoOrgID, "err", err.Error()) diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go new file mode 100644 index 00000000000..b80a508b8d7 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/lifecycle.go @@ -0,0 +1,28 @@ +package alertmanager + +import ( + "github.com/cortexproject/cortex/pkg/ring" +) + +func (r *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) { + // When we initialize the alertmanager instance in the ring we want to start from + // a clean situation, so whatever is the state we set it JOINING, while we keep existing + // tokens (if any). + var tokens []uint32 + if instanceExists { + tokens = instanceDesc.GetTokens() + } + + _, takenTokens := ringDesc.TokensFor(instanceID) + newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) + + // Tokens sorting will be enforced by the parent caller. + tokens = append(tokens, newTokens...) + + return ring.JOINING, tokens +} + +func (r *MultitenantAlertmanager) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} +func (r *MultitenantAlertmanager) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} +func (r *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go index a203295f4e8..d74e5edfcc5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/multitenant.go @@ -4,6 +4,7 @@ import ( "context" "flag" "fmt" + "hash/fnv" "html/template" "io/ioutil" "net/http" @@ -22,23 +23,29 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/alertmanager/alerts" + "github.com/cortexproject/cortex/pkg/ring" + "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) -var backoffConfig = util.BackoffConfig{ - // Backoff for loading initial configuration set. - MinBackoff: 100 * time.Millisecond, - MaxBackoff: 2 * time.Second, -} - const ( // If a config sets the webhook URL to this, it will be rewritten to // a URL derived from Config.AutoWebhookRoot autoWebhookURL = "http://internal.monitor" + // Reasons for (re)syncing alertmanager configurations from object storage. + reasonPeriodic = "periodic" + reasonInitial = "initial" + reasonRingChange = "ring-change" + + // ringAutoForgetUnhealthyPeriods is how many consecutive timeout periods an unhealthy instance + // in the ring will be automatically removed. + ringAutoForgetUnhealthyPeriods = 5 + statusPage = ` @@ -89,20 +96,37 @@ type MultitenantAlertmanagerConfig struct { ExternalURL flagext.URLValue `yaml:"external_url"` PollInterval time.Duration `yaml:"poll_interval"` - ClusterBindAddr string `yaml:"cluster_bind_address"` - ClusterAdvertiseAddr string `yaml:"cluster_advertise_address"` - Peers flagext.StringSlice `yaml:"peers"` - PeerTimeout time.Duration `yaml:"peer_timeout"` + DeprecatedClusterBindAddr string `yaml:"cluster_bind_address"` + DeprecatedClusterAdvertiseAddr string `yaml:"cluster_advertise_address"` + DeprecatedPeers flagext.StringSlice `yaml:"peers"` + DeprecatedPeerTimeout time.Duration `yaml:"peer_timeout"` + + // Enable sharding for the Alertmanager + ShardingEnabled bool `yaml:"sharding_enabled"` + ShardingRing RingConfig `yaml:"sharding_ring"` FallbackConfigFile string `yaml:"fallback_config_file"` AutoWebhookRoot string `yaml:"auto_webhook_root"` - Store AlertStoreConfig `yaml:"storage"` + Store AlertStoreConfig `yaml:"storage"` + Cluster ClusterConfig `yaml:"cluster"` EnableAPI bool `yaml:"enable_api"` } -const defaultClusterAddr = "0.0.0.0:9094" +type ClusterConfig struct { + ListenAddr string `yaml:"listen_address"` + AdvertiseAddr string `yaml:"advertise_address"` + Peers flagext.StringSliceCSV `yaml:"peers"` + PeerTimeout time.Duration `yaml:"peer_timeout"` + GossipInterval time.Duration `yaml:"gossip_interval"` + PushPullInterval time.Duration `yaml:"push_pull_interval"` +} + +const ( + defaultClusterAddr = "0.0.0.0:9094" + defaultPeerTimeout = 15 * time.Second +) // RegisterFlags adds the flags required to config this to the given FlagSet. func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { @@ -115,14 +139,57 @@ func (cfg *MultitenantAlertmanagerConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.AutoWebhookRoot, "alertmanager.configs.auto-webhook-root", "", "Root of URL to generate if config is "+autoWebhookURL) f.DurationVar(&cfg.PollInterval, "alertmanager.configs.poll-interval", 15*time.Second, "How frequently to poll Cortex configs") - f.StringVar(&cfg.ClusterBindAddr, "cluster.listen-address", defaultClusterAddr, "Listen address for cluster.") - f.StringVar(&cfg.ClusterAdvertiseAddr, "cluster.advertise-address", "", "Explicit address to advertise in cluster.") - f.Var(&cfg.Peers, "cluster.peer", "Initial peers (may be repeated).") - f.DurationVar(&cfg.PeerTimeout, "cluster.peer-timeout", time.Second*15, "Time to wait between peers to send notifications.") + // Flags prefixed with `cluster` are deprecated in favor of their `alertmanager` prefix equivalent. + // TODO: New flags introduced in Cortex 1.7, remove old ones in Cortex 1.9 + f.StringVar(&cfg.DeprecatedClusterBindAddr, "cluster.listen-address", defaultClusterAddr, "Deprecated. Use -alertmanager.cluster.listen-address instead.") + f.StringVar(&cfg.DeprecatedClusterAdvertiseAddr, "cluster.advertise-address", "", "Deprecated. Use -alertmanager.cluster.advertise-address instead.") + f.Var(&cfg.DeprecatedPeers, "cluster.peer", "Deprecated. Use -alertmanager.cluster.peers instead.") + f.DurationVar(&cfg.DeprecatedPeerTimeout, "cluster.peer-timeout", time.Second*15, "Deprecated. Use -alertmanager.cluster.peer-timeout instead.") f.BoolVar(&cfg.EnableAPI, "experimental.alertmanager.enable-api", false, "Enable the experimental alertmanager config api.") + f.BoolVar(&cfg.ShardingEnabled, "alertmanager.sharding-enabled", false, "Shard tenants across multiple alertmanager instances.") + + cfg.ShardingRing.RegisterFlags(f) cfg.Store.RegisterFlags(f) + cfg.Cluster.RegisterFlags(f) +} + +func (cfg *ClusterConfig) RegisterFlags(f *flag.FlagSet) { + prefix := "alertmanager.cluster." + f.StringVar(&cfg.ListenAddr, prefix+"listen-address", defaultClusterAddr, "Listen address and port for the cluster. Not specifying this flag disables high-availability mode.") + f.StringVar(&cfg.AdvertiseAddr, prefix+"advertise-address", "", "Explicit address or hostname to advertise in cluster.") + f.Var(&cfg.Peers, prefix+"peers", "Comma-separated list of initial peers.") + f.DurationVar(&cfg.PeerTimeout, prefix+"peer-timeout", defaultPeerTimeout, "Time to wait between peers to send notifications.") + f.DurationVar(&cfg.GossipInterval, prefix+"gossip-interval", cluster.DefaultGossipInterval, "The interval between sending gossip messages. By lowering this value (more frequent) gossip messages are propagated across cluster more quickly at the expense of increased bandwidth usage.") + f.DurationVar(&cfg.PushPullInterval, prefix+"push-pull-interval", cluster.DefaultPushPullInterval, "The interval between gossip state syncs. Setting this interval lower (more frequent) will increase convergence speeds across larger clusters at the expense of increased bandwidth usage.") +} + +// SupportDeprecatedFlagset ensures we support the previous set of cluster flags that are now deprecated. +func (cfg *ClusterConfig) SupportDeprecatedFlagset(amCfg *MultitenantAlertmanagerConfig, logger log.Logger) { + if amCfg.DeprecatedClusterBindAddr != defaultClusterAddr { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.listen-address, use -alertmanager.cluster.listen-address instead.") + cfg.ListenAddr = amCfg.DeprecatedClusterBindAddr + } + + if amCfg.DeprecatedClusterAdvertiseAddr != "" { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.advertise-address, use -alertmanager.cluster.advertise-address instead.") + cfg.AdvertiseAddr = amCfg.DeprecatedClusterAdvertiseAddr + } + + if len(amCfg.DeprecatedPeers) > 0 { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.peer, use -alertmanager.cluster.peers instead.") + cfg.Peers = []string(amCfg.DeprecatedPeers) + } + + if amCfg.DeprecatedPeerTimeout != defaultPeerTimeout { + flagext.DeprecatedFlagsUsed.Inc() + level.Warn(logger).Log("msg", "running with DEPRECATED flag -cluster.peer-timeout, use -alertmanager.cluster.peer-timeout instead.") + cfg.PeerTimeout = amCfg.DeprecatedPeerTimeout + } } // Validate config and returns error on failure @@ -163,6 +230,14 @@ type MultitenantAlertmanager struct { cfg *MultitenantAlertmanagerConfig + // Ring used for sharding alertmanager instances. + ringLifecycler *ring.BasicLifecycler + ring *ring.Ring + + // Subservices manager (ring, lifecycler) + subservices *services.Manager + subservicesWatcher *services.FailureWatcher + store AlertStore // The fallback config is stored as a string and parsed every time it's needed @@ -170,17 +245,24 @@ type MultitenantAlertmanager struct { // effect here. fallbackConfig string - // All the organization configurations that we have. Only used for instrumentation. - cfgs map[string]alerts.AlertConfigDesc - alertmanagersMtx sync.Mutex alertmanagers map[string]*Alertmanager + // Stores the current set of configurations we're running in each tenant's Alertmanager. + // Used for comparing configurations as we synchronize them. + cfgs map[string]alerts.AlertConfigDesc logger log.Logger alertmanagerMetrics *alertmanagerMetrics multitenantMetrics *multitenantAlertmanagerMetrics peer *cluster.Peer + + registry prometheus.Registerer + ringCheckErrors prometheus.Counter + tenantsOwned prometheus.Gauge + tenantsDiscovered prometheus.Gauge + syncTotal *prometheus.CounterVec + syncFailures *prometheus.CounterVec } // NewMultitenantAlertmanager creates a new MultitenantAlertmanager. @@ -206,17 +288,19 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, logger log.L } } + cfg.Cluster.SupportDeprecatedFlagset(cfg, logger) + var peer *cluster.Peer - if cfg.ClusterBindAddr != "" { + if cfg.Cluster.ListenAddr != "" { peer, err = cluster.Create( log.With(logger, "component", "cluster"), registerer, - cfg.ClusterBindAddr, - cfg.ClusterAdvertiseAddr, - cfg.Peers, + cfg.Cluster.ListenAddr, + cfg.Cluster.AdvertiseAddr, + cfg.Cluster.Peers, true, - cluster.DefaultPushPullInterval, - cluster.DefaultGossipInterval, + cfg.Cluster.PushPullInterval, + cfg.Cluster.GossipInterval, cluster.DefaultTcpTimeout, cluster.DefaultProbeTimeout, cluster.DefaultProbeInterval, @@ -226,7 +310,7 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, logger log.L } err = peer.Join(cluster.DefaultReconnectInterval, cluster.DefaultReconnectTimeout) if err != nil { - level.Warn(logger).Log("msg", "unable to join gossip mesh", "err", err) + level.Warn(logger).Log("msg", "unable to join gossip mesh while initializing cluster for high availability mode", "err", err) } go peer.Settle(context.Background(), cluster.DefaultGossipInterval) } @@ -236,10 +320,22 @@ func NewMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, logger log.L return nil, err } - return createMultitenantAlertmanager(cfg, fallbackConfig, peer, store, logger, registerer), nil + var ringStore kv.Client + if cfg.ShardingEnabled { + ringStore, err = kv.NewClient( + cfg.ShardingRing.KVStore, + ring.GetCodec(), + kv.RegistererWithKVName(registerer, "alertmanager"), + ) + if err != nil { + return nil, errors.Wrap(err, "create KV store client") + } + } + + return createMultitenantAlertmanager(cfg, fallbackConfig, peer, store, ringStore, logger, registerer) } -func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackConfig []byte, peer *cluster.Peer, store AlertStore, logger log.Logger, registerer prometheus.Registerer) *MultitenantAlertmanager { +func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackConfig []byte, peer *cluster.Peer, store AlertStore, ringStore kv.Client, logger log.Logger, registerer prometheus.Registerer) (*MultitenantAlertmanager, error) { am := &MultitenantAlertmanager{ cfg: cfg, fallbackConfig: string(fallbackConfig), @@ -250,29 +346,176 @@ func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackC peer: peer, store: store, logger: log.With(logger, "component", "MultiTenantAlertmanager"), + registry: registerer, + ringCheckErrors: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ + Name: "cortex_alertmanager_ring_check_errors_total", + Help: "Number of errors that have occurred when checking the ring for ownership.", + }), + syncTotal: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_alertmanager_sync_configs_total", + Help: "Total number of times the alertmanager sync operation triggered.", + }, []string{"reason"}), + syncFailures: promauto.With(registerer).NewCounterVec(prometheus.CounterOpts{ + Name: "cortex_alertmanager_sync_configs_failed_total", + Help: "Total number of times the alertmanager sync operation failed.", + }, []string{"reason"}), + tenantsDiscovered: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_alertmanager_tenants_discovered", + Help: "Number of tenants with an Alertmanager configuration discovered.", + }), + tenantsOwned: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{ + Name: "cortex_alertmanager_tenants_owned", + Help: "Current number of tenants owned by the Alertmanager instance.", + }), + } + + // Initialize the top-level metrics. + for _, r := range []string{reasonInitial, reasonPeriodic, reasonRingChange} { + am.syncTotal.WithLabelValues(r) + am.syncFailures.WithLabelValues(r) + } + + if cfg.ShardingEnabled { + lifecyclerCfg, err := am.cfg.ShardingRing.ToLifecyclerConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to initialize Alertmanager's lifecycler config") + } + + // Define lifecycler delegates in reverse order (last to be called defined first because they're + // chained via "next delegate"). + delegate := ring.BasicLifecyclerDelegate(am) + delegate = ring.NewLeaveOnStoppingDelegate(delegate, am.logger) + delegate = ring.NewAutoForgetDelegate(am.cfg.ShardingRing.HeartbeatTimeout*ringAutoForgetUnhealthyPeriods, delegate, am.logger) + + am.ringLifecycler, err = ring.NewBasicLifecycler(lifecyclerCfg, RingNameForServer, RingKey, ringStore, delegate, am.logger, am.registry) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize Alertmanager's lifecycler") + } + + am.ring, err = ring.NewWithStoreClientAndStrategy(am.cfg.ShardingRing.ToRingConfig(), RingNameForServer, RingKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy()) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize Alertmanager's ring") + } + + if am.registry != nil { + am.registry.MustRegister(am.ring) + } } if registerer != nil { registerer.MustRegister(am.alertmanagerMetrics) } - am.Service = services.NewTimerService(am.cfg.PollInterval, am.starting, am.iteration, am.stopping) - return am + am.Service = services.NewBasicService(am.starting, am.run, am.stopping) + + return am, nil } -func (am *MultitenantAlertmanager) starting(ctx context.Context) error { - // Load initial set of all configurations before polling for new ones. - am.syncConfigs(am.loadAllConfigs()) +func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) { + defer func() { + if err == nil || am.subservices == nil { + return + } + + if stopErr := services.StopManagerAndAwaitStopped(context.Background(), am.subservices); stopErr != nil { + level.Error(am.logger).Log("msg", "failed to gracefully stop alertmanager dependencies", "err", stopErr) + } + }() + + if am.cfg.ShardingEnabled { + if am.subservices, err = services.NewManager(am.ringLifecycler, am.ring); err != nil { + return errors.Wrap(err, "failed to start alertmanager's subservices") + } + + if err = services.StartManagerAndAwaitHealthy(ctx, am.subservices); err != nil { + return errors.Wrap(err, "failed to start alertmanager's subservices") + } + + am.subservicesWatcher = services.NewFailureWatcher() + am.subservicesWatcher.WatchManager(am.subservices) + + // We wait until the instance is in the JOINING state, once it does we know that tokens are assigned to this instance and we'll be ready to perform an initial sync of configs. + level.Info(am.logger).Log("waiting until alertmanager is JOINING in the ring") + if err = ring.WaitInstanceState(ctx, am.ring, am.ringLifecycler.GetInstanceID(), ring.JOINING); err != nil { + return err + } + level.Info(am.logger).Log("msg", "alertmanager is JOINING in the ring") + } + + // At this point, if sharding is enabled, the instance is registered with some tokens + // and we can run the initial iteration to sync configs. If no sharding is enabled we load _all_ the configs. + if err := am.loadAndSyncConfigs(ctx, reasonInitial); err != nil { + return err + } + + if am.cfg.ShardingEnabled { + // With the initial sync now completed, we should have loaded all assigned alertmanager configurations to this instance. We can switch it to ACTIVE and start serving requests. + if err := am.ringLifecycler.ChangeState(ctx, ring.ACTIVE); err != nil { + return errors.Wrapf(err, "switch instance to %s in the ring", ring.ACTIVE) + } + + // Wait until the ring client detected this instance in the ACTIVE state. + level.Info(am.logger).Log("msg", "waiting until alertmanager is ACTIVE in the ring") + if err := ring.WaitInstanceState(ctx, am.ring, am.ringLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { + return err + } + level.Info(am.logger).Log("msg", "alertmanager is ACTIVE in the ring") + } + return nil } -func (am *MultitenantAlertmanager) iteration(ctx context.Context) error { - err := am.updateConfigs() +func (am *MultitenantAlertmanager) run(ctx context.Context) error { + tick := time.NewTicker(am.cfg.PollInterval) + defer tick.Stop() + + var ringTickerChan <-chan time.Time + var ringLastState ring.ReplicationSet + + if am.cfg.ShardingEnabled { + ringLastState, _ = am.ring.GetAllHealthy(RingOp) + ringTicker := time.NewTicker(util.DurationWithJitter(am.cfg.ShardingRing.RingCheckPeriod, 0.2)) + defer ringTicker.Stop() + ringTickerChan = ringTicker.C + } + + for { + select { + case <-ctx.Done(): + return nil + case err := <-am.subservicesWatcher.Chan(): + return errors.Wrap(err, "alertmanager subservices failed") + case <-tick.C: + // We don't want to halt execution here but instead just log what happened. + if err := am.loadAndSyncConfigs(ctx, reasonPeriodic); err != nil { + level.Warn(am.logger).Log("msg", "error while synchronizing alertmanager configs", "err", err) + } + case <-ringTickerChan: + // We ignore the error because in case of error it will return an empty + // replication set which we use to compare with the previous state. + currRingState, _ := am.ring.GetAllHealthy(RingOp) + + if ring.HasReplicationSetChanged(ringLastState, currRingState) { + ringLastState = currRingState + if err := am.loadAndSyncConfigs(ctx, reasonRingChange); err != nil { + level.Warn(am.logger).Log("msg", "error while synchronizing alertmanager configs", "err", err) + } + } + } + } +} + +func (am *MultitenantAlertmanager) loadAndSyncConfigs(ctx context.Context, syncReason string) error { + level.Info(am.logger).Log("msg", "synchronizing alertmanager configs for users") + am.syncTotal.WithLabelValues(syncReason).Inc() + + cfgs, err := am.loadAlertmanagerConfigs(ctx) if err != nil { - level.Warn(am.logger).Log("msg", "error updating configs", "err", err) + am.syncFailures.WithLabelValues(syncReason).Inc() + return err } - // Returning error here would stop "MultitenantAlertmanager" service completely, - // so we return nil to keep service running. + + am.syncConfigs(cfgs) return nil } @@ -280,48 +523,70 @@ func (am *MultitenantAlertmanager) iteration(ctx context.Context) error { func (am *MultitenantAlertmanager) stopping(_ error) error { am.alertmanagersMtx.Lock() for _, am := range am.alertmanagers { - am.Stop() + am.StopAndWait() } am.alertmanagersMtx.Unlock() - err := am.peer.Leave(am.cfg.PeerTimeout) - if err != nil { - level.Warn(am.logger).Log("msg", "failed to leave the cluster", "err", err) + if am.peer != nil { // Tests don't setup any peer. + err := am.peer.Leave(am.cfg.Cluster.PeerTimeout) + if err != nil { + level.Warn(am.logger).Log("msg", "failed to leave the cluster", "err", err) + } } - level.Debug(am.logger).Log("msg", "stopping") - return nil -} -// Load the full set of configurations from the alert store, retrying with backoff -// until we can get them. -func (am *MultitenantAlertmanager) loadAllConfigs() map[string]alerts.AlertConfigDesc { - backoff := util.NewBackoff(context.Background(), backoffConfig) - for { - cfgs, err := am.poll() - if err == nil { - level.Debug(am.logger).Log("msg", "initial configuration load", "num_configs", len(cfgs)) - return cfgs - } - level.Warn(am.logger).Log("msg", "error fetching all configurations, backing off", "err", err) - backoff.Wait() + if am.subservices != nil { + // subservices manages ring and lifecycler, if sharding was enabled. + _ = services.StopManagerAndAwaitStopped(context.Background(), am.subservices) } + return nil } -func (am *MultitenantAlertmanager) updateConfigs() error { - cfgs, err := am.poll() +// loadAlertmanagerConfigs Loads (and filters) the alertmanagers configuration from object storage, taking into consideration the sharding strategy. +func (am *MultitenantAlertmanager) loadAlertmanagerConfigs(ctx context.Context) (map[string]alerts.AlertConfigDesc, error) { + configs, err := am.store.ListAlertConfigs(ctx) if err != nil { - return err + return nil, err } - am.syncConfigs(cfgs) - return nil + + // Without any sharding, we return _all_ the configs and there's nothing else for us to do. + if !am.cfg.ShardingEnabled { + am.tenantsDiscovered.Set(float64(len(configs))) + am.tenantsOwned.Set(float64(len(configs))) + return configs, nil + } + + ownedConfigs := map[string]alerts.AlertConfigDesc{} + for userID, cfg := range configs { + owned, err := am.isConfigOwned(userID) + if err != nil { + am.ringCheckErrors.Inc() + level.Error(am.logger).Log("msg", "failed to load alertmanager configuration for user", "user", userID, "err", err) + continue + } + + if owned { + level.Debug(am.logger).Log("msg", "alertmanager configuration owned", "user", userID) + ownedConfigs[userID] = cfg + } else { + level.Debug(am.logger).Log("msg", "alertmanager configuration not owned, ignoring", "user", userID) + } + } + + am.tenantsDiscovered.Set(float64(len(configs))) + am.tenantsOwned.Set(float64(len(ownedConfigs))) + return ownedConfigs, nil } -// poll the alert store. Not re-entrant. -func (am *MultitenantAlertmanager) poll() (map[string]alerts.AlertConfigDesc, error) { - cfgs, err := am.store.ListAlertConfigs(context.Background()) +func (am *MultitenantAlertmanager) isConfigOwned(userID string) (bool, error) { + ringHasher := fnv.New32a() + // Hasher never returns err. + _, _ = ringHasher.Write([]byte(userID)) + + alertmanagers, err := am.ring.Get(ringHasher.Sum32(), RingOp, nil, nil, nil) if err != nil { - return nil, err + return false, errors.Wrap(err, "error reading ring to verify config ownership") } - return cfgs, nil + + return alertmanagers.Includes(am.ringLifecycler.GetInstanceAddr()), nil } func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfigDesc) { @@ -340,17 +605,16 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi am.alertmanagersMtx.Lock() defer am.alertmanagersMtx.Unlock() - for user, userAM := range am.alertmanagers { - if _, exists := cfgs[user]; !exists { - // The user alertmanager is only paused in order to retain the prometheus metrics - // it has reported to its registry. If a new config for this user appears, this structure - // will be reused. - level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", user) - userAM.Pause() - delete(am.cfgs, user) - am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(user) - am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(user) - level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", user) + for userID, userAM := range am.alertmanagers { + if _, exists := cfgs[userID]; !exists { + level.Info(am.logger).Log("msg", "deactivating per-tenant alertmanager", "user", userID) + userAM.Stop() + delete(am.alertmanagers, userID) + delete(am.cfgs, userID) + am.multitenantMetrics.lastReloadSuccessful.DeleteLabelValues(userID) + am.multitenantMetrics.lastReloadSuccessfulTimestamp.DeleteLabelValues(userID) + am.alertmanagerMetrics.removeUserRegistry(userID) + level.Info(am.logger).Log("msg", "deactivated per-tenant alertmanager", "user", userID) } } } @@ -358,9 +622,6 @@ func (am *MultitenantAlertmanager) syncConfigs(cfgs map[string]alerts.AlertConfi // setConfig applies the given configuration to the alertmanager for `userID`, // creating an alertmanager if it doesn't already exist. func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { - am.alertmanagersMtx.Lock() - existing, hasExisting := am.alertmanagers[cfg.User] - am.alertmanagersMtx.Unlock() var userAmConfig *amconfig.Config var err error var hasTemplateChanges bool @@ -378,6 +639,10 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { level.Debug(am.logger).Log("msg", "setting config", "user", cfg.User) + am.alertmanagersMtx.Lock() + defer am.alertmanagersMtx.Unlock() + existing, hasExisting := am.alertmanagers[cfg.User] + rawCfg := cfg.RawConfig if cfg.RawConfig == "" { if am.fallbackConfig == "" { @@ -430,9 +695,7 @@ func (am *MultitenantAlertmanager) setConfig(cfg alerts.AlertConfigDesc) error { if err != nil { return err } - am.alertmanagersMtx.Lock() am.alertmanagers[cfg.User] = newAM - am.alertmanagersMtx.Unlock() } else if am.cfgs[cfg.User].RawConfig != cfg.RawConfig || hasTemplateChanges { level.Info(am.logger).Log("msg", "updating new per-tenant alertmanager", "user", cfg.User) // If the config changed, apply the new one. @@ -450,9 +713,9 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco newAM, err := New(&Config{ UserID: userID, DataDir: am.cfg.DataDir, - Logger: util.Logger, + Logger: util_log.Logger, Peer: am.peer, - PeerTimeout: am.cfg.PeerTimeout, + PeerTimeout: am.cfg.Cluster.PeerTimeout, Retention: am.cfg.Retention, ExternalURL: am.cfg.ExternalURL.URL, }, reg) @@ -470,6 +733,11 @@ func (am *MultitenantAlertmanager) newAlertmanager(userID string, amConfig *amco // ServeHTTP serves the Alertmanager's web UI and API. func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if am.State() != services.Running { + http.Error(w, "Alertmanager not ready", http.StatusServiceUnavailable) + return + } + userID, err := tenant.TenantID(req.Context()) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) @@ -480,11 +748,6 @@ func (am *MultitenantAlertmanager) ServeHTTP(w http.ResponseWriter, req *http.Re am.alertmanagersMtx.Unlock() if ok { - if !userAM.IsActive() { - level.Debug(am.logger).Log("msg", "the Alertmanager is not active", "user", userID) - http.Error(w, "the Alertmanager is not configured", http.StatusNotFound) - return - } userAM.mux.ServeHTTP(w, req) return diff --git a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go index 21c6c4812d4..421314ae615 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go +++ b/vendor/github.com/cortexproject/cortex/pkg/alertmanager/storage.go @@ -13,6 +13,7 @@ import ( "github.com/cortexproject/cortex/pkg/alertmanager/alerts/objectclient" "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/aws" + "github.com/cortexproject/cortex/pkg/chunk/azure" "github.com/cortexproject/cortex/pkg/chunk/gcp" "github.com/cortexproject/cortex/pkg/configs/client" ) @@ -27,26 +28,32 @@ type AlertStore interface { // AlertStoreConfig configures the alertmanager backend type AlertStoreConfig struct { - Type string `yaml:"type"` - ConfigDB client.Config `yaml:"configdb"` - Local local.StoreConfig `yaml:"local"` + Type string `yaml:"type"` + ConfigDB client.Config `yaml:"configdb"` - GCS gcp.GCSConfig `yaml:"gcs"` - S3 aws.S3Config `yaml:"s3"` + // Object Storage Configs + Azure azure.BlobStorageConfig `yaml:"azure"` + GCS gcp.GCSConfig `yaml:"gcs"` + S3 aws.S3Config `yaml:"s3"` + Local local.StoreConfig `yaml:"local"` } // RegisterFlags registers flags. func (cfg *AlertStoreConfig) RegisterFlags(f *flag.FlagSet) { - cfg.Local.RegisterFlags(f) cfg.ConfigDB.RegisterFlagsWithPrefix("alertmanager.", f) f.StringVar(&cfg.Type, "alertmanager.storage.type", "configdb", "Type of backend to use to store alertmanager configs. Supported values are: \"configdb\", \"gcs\", \"s3\", \"local\".") + cfg.Azure.RegisterFlagsWithPrefix("alertmanager.storage.", f) cfg.GCS.RegisterFlagsWithPrefix("alertmanager.storage.", f) cfg.S3.RegisterFlagsWithPrefix("alertmanager.storage.", f) + cfg.Local.RegisterFlags(f) } // Validate config and returns error on failure func (cfg *AlertStoreConfig) Validate() error { + if err := cfg.Azure.Validate(); err != nil { + return errors.Wrap(err, "invalid Azure Storage config") + } if err := cfg.S3.Validate(); err != nil { return errors.Wrap(err, "invalid S3 Storage config") } @@ -62,12 +69,14 @@ func NewAlertStore(cfg AlertStoreConfig) (AlertStore, error) { return nil, err } return configdb.NewStore(c), nil - case "local": - return local.NewStore(cfg.Local) + case "azure": + return newObjAlertStore(azure.NewBlobStorage(&cfg.Azure)) case "gcs": return newObjAlertStore(gcp.NewGCSObjectClient(context.Background(), cfg.GCS)) case "s3": return newObjAlertStore(aws.NewS3ObjectClient(cfg.S3)) + case "local": + return local.NewStore(cfg.Local) default: return nil, fmt.Errorf("unrecognized alertmanager storage backend %v, choose one of: azure, configdb, gcs, local, s3", cfg.Type) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/api/api.go index e174b53c6af..3f7d85dd782 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/api.go @@ -35,6 +35,9 @@ import ( "github.com/cortexproject/cortex/pkg/util/push" ) +// DistributorPushWrapper wraps around a push. It is similar to middleware.Interface. +type DistributorPushWrapper func(next push.Func) push.Func + type Config struct { ResponseCompression bool `yaml:"response_compression_enabled"` @@ -45,6 +48,10 @@ type Config struct { ServerPrefix string `yaml:"-"` LegacyHTTPPrefix string `yaml:"-"` HTTPAuthMiddleware middleware.Interface `yaml:"-"` + + // This allows downstream projects to wrap the distributor push function + // and access the deserialized write requests before/after they are pushed. + DistributorPushWrapper DistributorPushWrapper `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet. @@ -59,6 +66,15 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.PrometheusHTTPPrefix, prefix+"http.prometheus-http-prefix", "/prometheus", "HTTP URL path under which the Prometheus api will be served.") } +// Push either wraps the distributor push function as configured or returns the distributor push directly. +func (cfg *Config) wrapDistributorPush(d *distributor.Distributor) push.Func { + if cfg.DistributorPushWrapper != nil { + return cfg.DistributorPushWrapper(d.Push) + } + + return d.Push +} + type API struct { AuthMiddleware middleware.Interface @@ -143,8 +159,10 @@ func (a *API) RegisterRoutesWithPrefix(prefix string, handler http.Handler, auth // serve endpoints using the legacy http-prefix if it is not run as a single binary. func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, target, apiEnabled bool) { a.indexPage.AddLink(SectionAdminEndpoints, "/multitenant_alertmanager/status", "Alertmanager Status") + a.indexPage.AddLink(SectionAdminEndpoints, "/multitenant_alertmanager/ring", "Alertmanager Ring Status") // Ensure this route is registered before the prefixed AM route a.RegisterRoute("/multitenant_alertmanager/status", am.GetStatusHandler(), false, "GET") + a.RegisterRoute("/multitenant_alertmanager/ring", http.HandlerFunc(am.RingHandler), false, "GET", "POST") // UI components lead to a large number of routes to support, utilize a path prefix instead a.RegisterRoutesWithPrefix(a.cfg.AlertmanagerHTTPPrefix, am, true) @@ -166,17 +184,28 @@ func (a *API) RegisterAlertmanager(am *alertmanager.MultitenantAlertmanager, tar } // RegisterAPI registers the standard endpoints associated with a running Cortex. -func (a *API) RegisterAPI(httpPathPrefix string, cfg interface{}) { - a.indexPage.AddLink(SectionAdminEndpoints, "/config", "Current Config") +func (a *API) RegisterAPI(httpPathPrefix string, actualCfg interface{}, defaultCfg interface{}) { + a.indexPage.AddLink(SectionAdminEndpoints, "/config", "Current Config (including the default values)") + a.indexPage.AddLink(SectionAdminEndpoints, "/config?mode=diff", "Current Config (show only values that differ from the defaults)") - a.RegisterRoute("/config", configHandler(cfg), false, "GET") + a.RegisterRoute("/config", configHandler(actualCfg, defaultCfg), false, "GET") a.RegisterRoute("/", indexHandler(httpPathPrefix, a.indexPage), false, "GET") a.RegisterRoute("/debug/fgprof", fgprof.Handler(), false, "GET") } +// RegisterRuntimeConfig registers the endpoints associates with the runtime configuration +func (a *API) RegisterRuntimeConfig(runtimeConfigHandler http.HandlerFunc) { + a.indexPage.AddLink(SectionAdminEndpoints, "/runtime_config", "Current Runtime Config (incl. Overrides)") + a.indexPage.AddLink(SectionAdminEndpoints, "/runtime_config?mode=diff", "Current Runtime Config (show only values that differ from the defaults)") + + a.RegisterRoute("/runtime_config", runtimeConfigHandler, false, "GET") +} + // RegisterDistributor registers the endpoints associated with the distributor. func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distributor.Config) { - a.RegisterRoute("/api/v1/push", push.Handler(pushConfig, a.sourceIPs, d.Push), true, "POST") + client.RegisterPushOnlyIngesterServer(a.server.GRPC, d) + + a.RegisterRoute("/api/v1/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/all_user_stats", "Usage Statistics") a.indexPage.AddLink(SectionAdminEndpoints, "/distributor/ha_tracker", "HA Tracking Status") @@ -185,7 +214,7 @@ func (a *API) RegisterDistributor(d *distributor.Distributor, pushConfig distrib a.RegisterRoute("/distributor/ha_tracker", d.HATracker, false, "GET") // Legacy Routes - a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/push", push.Handler(pushConfig, a.sourceIPs, d.Push), true, "POST") + a.RegisterRoute(a.cfg.LegacyHTTPPrefix+"/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, a.cfg.wrapDistributorPush(d)), true, "POST") a.RegisterRoute("/all_user_stats", http.HandlerFunc(d.AllUserStatsHandler), false, "GET") a.RegisterRoute("/ha-tracker", d.HATracker, false, "GET") } @@ -207,12 +236,12 @@ func (a *API) RegisterIngester(i Ingester, pushConfig distributor.Config) { a.indexPage.AddLink(SectionDangerous, "/ingester/shutdown", "Trigger Ingester Shutdown (Dangerous)") a.RegisterRoute("/ingester/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") a.RegisterRoute("/ingester/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/ingester/push", push.Handler(pushConfig, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/ingester/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. // Legacy Routes a.RegisterRoute("/flush", http.HandlerFunc(i.FlushHandler), false, "GET", "POST") a.RegisterRoute("/shutdown", http.HandlerFunc(i.ShutdownHandler), false, "GET", "POST") - a.RegisterRoute("/push", push.Handler(pushConfig, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. + a.RegisterRoute("/push", push.Handler(pushConfig.MaxRecvMsgSize, a.sourceIPs, i.Push), true, "POST") // For testing and debugging. } // RegisterChunksPurger registers the endpoints associated with the Purger/DeleteStore. They do not exactly @@ -358,3 +387,8 @@ func (a *API) RegisterServiceMapHandler(handler http.Handler) { a.indexPage.AddLink(SectionAdminEndpoints, "/services", "Service Status") a.RegisterRoute("/services", handler, false, "GET") } + +func (a *API) RegisterMemberlistKV(handler http.Handler) { + a.indexPage.AddLink(SectionAdminEndpoints, "/memberlist", "Memberlist Status") + a.RegisterRoute("/memberlist", handler, false, "GET") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go index 9afffcc8730..80a90ae48e5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/handlers.go @@ -9,7 +9,6 @@ import ( "sync" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" "github.com/gorilla/mux" "github.com/opentracing-contrib/go-stdlib/nethttp" "github.com/opentracing/opentracing-go" @@ -24,7 +23,6 @@ import ( v1 "github.com/prometheus/prometheus/web/api/v1" "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/middleware" - "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/chunk/purger" "github.com/cortexproject/cortex/pkg/distributor" @@ -115,19 +113,37 @@ func indexHandler(httpPathPrefix string, content *IndexPageContent) http.Handler } } -func configHandler(cfg interface{}) http.HandlerFunc { +func configHandler(actualCfg interface{}, defaultCfg interface{}) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { - out, err := yaml.Marshal(cfg) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } + var output interface{} + switch r.URL.Query().Get("mode") { + case "diff": + defaultCfgObj, err := util.YAMLMarshalUnmarshal(defaultCfg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } - w.Header().Set("Content-Type", "text/yaml") - w.WriteHeader(http.StatusOK) - if _, err := w.Write(out); err != nil { - level.Error(util.Logger).Log("msg", "error writing response", "err", err) + actualCfgObj, err := util.YAMLMarshalUnmarshal(actualCfg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + diff, err := util.DiffConfig(defaultCfgObj, actualCfgObj) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + output = diff + + case "defaults": + output = defaultCfg + default: + output = actualCfg } + + util.WriteYAMLResponse(w, output) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go b/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go index f7ec9d6a819..7e0e88e8030 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go +++ b/vendor/github.com/cortexproject/cortex/pkg/api/middlewares.go @@ -14,13 +14,13 @@ import ( func getHTTPCacheGenNumberHeaderSetterMiddleware(cacheGenNumbersLoader *purger.TombstonesLoader) middleware.Interface { return middleware.Func(func(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - userID, err := tenant.TenantID(r.Context()) + tenantIDs, err := tenant.TenantIDs(r.Context()) if err != nil { http.Error(w, err.Error(), http.StatusUnauthorized) return } - cacheGenNumber := cacheGenNumbersLoader.GetResultsCacheGenNumber(userID) + cacheGenNumber := cacheGenNumbersLoader.GetResultsCacheGenNumber(tenantIDs) w.Header().Set(queryrange.ResultsCacheGenNumberHeaderName, cacheGenNumber) next.ServeHTTP(w, r) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go index 6b470050526..d3c67f71054 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_storage_client.go @@ -31,6 +31,8 @@ import ( chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -212,8 +214,8 @@ func (a dynamoDBStorageClient) BatchWrite(ctx context.Context, input chunk.Write continue } else if ok && awsErr.Code() == validationException { // this write will never work, so the only option is to drop the offending items and continue. - level.Warn(util.Logger).Log("msg", "Data lost while flushing to DynamoDB", "err", awsErr) - level.Debug(util.Logger).Log("msg", "Dropped request details", "requests", requests) + level.Warn(log.Logger).Log("msg", "Data lost while flushing to DynamoDB", "err", awsErr) + level.Debug(log.Logger).Log("msg", "Dropped request details", "requests", requests) util.Event().Log("msg", "ValidationException", "requests", requests) // recording the drop counter separately from recordDynamoError(), as the error code alone may not provide enough context // to determine if a request was dropped (or not) @@ -684,11 +686,11 @@ func (b dynamoDBWriteBatch) TakeReqs(from dynamoDBWriteBatch, max int) { outLen, inLen := b.Len(), from.Len() toFill := inLen if max > 0 { - toFill = util.Min(inLen, max-outLen) + toFill = math.Min(inLen, max-outLen) } for toFill > 0 { for tableName, fromReqs := range from { - taken := util.Min(len(fromReqs), toFill) + taken := math.Min(len(fromReqs), toFill) if taken > 0 { b[tableName] = append(b[tableName], fromReqs[:taken]...) from[tableName] = fromReqs[taken:] @@ -731,11 +733,11 @@ func (b dynamoDBReadRequest) TakeReqs(from dynamoDBReadRequest, max int) { outLen, inLen := b.Len(), from.Len() toFill := inLen if max > 0 { - toFill = util.Min(inLen, max-outLen) + toFill = math.Min(inLen, max-outLen) } for toFill > 0 { for tableName, fromReqs := range from { - taken := util.Min(len(fromReqs.Keys), toFill) + taken := math.Min(len(fromReqs.Keys), toFill) if taken > 0 { if _, ok := b[tableName]; !ok { b[tableName] = &dynamodb.KeysAndAttributes{ @@ -788,7 +790,7 @@ func awsSessionFromURL(awsURL *url.URL) (client.ConfigProvider, error) { } path := strings.TrimPrefix(awsURL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } config, err := awscommon.ConfigFromURL(awsURL) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go index de1798b7eb8..fa7b5f5fad7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/dynamodb_table_client.go @@ -16,6 +16,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" ) // Pluggable auto-scaler implementation @@ -83,7 +84,7 @@ func (d callManager) backoffAndRetry(ctx context.Context, fn func(context.Contex for backoff.Ongoing() { if err := fn(ctx); err != nil { if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "ThrottlingException" { - level.Warn(util.WithContext(ctx, util.Logger)).Log("msg", "got error, backing off and retrying", "err", err, "retry", backoff.NumRetries()) + level.Warn(log.WithContext(ctx, log.Logger)).Log("msg", "got error, backing off and retrying", "err", err, "retry", backoff.NumRetries()) backoff.Wait() continue } else { @@ -291,7 +292,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch return err } } - level.Debug(util.Logger).Log("msg", "Updating Table", + level.Debug(log.Logger).Log("msg", "Updating Table", "expectedWrite", expected.ProvisionedWrite, "currentWrite", current.ProvisionedWrite, "expectedRead", expected.ProvisionedRead, @@ -301,7 +302,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch if (current.ProvisionedRead != expected.ProvisionedRead || current.ProvisionedWrite != expected.ProvisionedWrite) && !expected.UseOnDemandIOMode { - level.Info(util.Logger).Log("msg", "updating provisioned throughput on table", "table", expected.Name, "old_read", current.ProvisionedRead, "old_write", current.ProvisionedWrite, "new_read", expected.ProvisionedRead, "new_write", expected.ProvisionedWrite) + level.Info(log.Logger).Log("msg", "updating provisioned throughput on table", "table", expected.Name, "old_read", current.ProvisionedRead, "old_write", current.ProvisionedWrite, "new_read", expected.ProvisionedRead, "new_write", expected.ProvisionedWrite) if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { var dynamoBillingMode string @@ -315,7 +316,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch // an error if we set a table to the billing mode it is currently on. if current.UseOnDemandIOMode != expected.UseOnDemandIOMode { dynamoBillingMode = dynamodb.BillingModeProvisioned - level.Info(util.Logger).Log("msg", "updating billing mode on table", "table", expected.Name, "old_mode", current.UseOnDemandIOMode, "new_mode", expected.UseOnDemandIOMode) + level.Info(log.Logger).Log("msg", "updating billing mode on table", "table", expected.Name, "old_mode", current.UseOnDemandIOMode, "new_mode", expected.UseOnDemandIOMode) updateTableInput.BillingMode = aws.String(dynamoBillingMode) } @@ -325,7 +326,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch }); err != nil { recordDynamoError(expected.Name, err, "DynamoDB.UpdateTable", d.metrics) if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "LimitExceededException" { - level.Warn(util.Logger).Log("msg", "update limit exceeded", "err", err) + level.Warn(log.Logger).Log("msg", "update limit exceeded", "err", err) } else { return err } @@ -335,7 +336,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch // settings used in provisioned mode. Unfortunately the boilerplate wrappers for retry and tracking needed to be copied. if err := d.backoffAndRetry(ctx, func(ctx context.Context) error { return instrument.CollectedRequest(ctx, "DynamoDB.UpdateTable", d.metrics.dynamoRequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - level.Info(util.Logger).Log("msg", "updating billing mode on table", "table", expected.Name, "old_mode", current.UseOnDemandIOMode, "new_mode", expected.UseOnDemandIOMode) + level.Info(log.Logger).Log("msg", "updating billing mode on table", "table", expected.Name, "old_mode", current.UseOnDemandIOMode, "new_mode", expected.UseOnDemandIOMode) updateTableInput := &dynamodb.UpdateTableInput{TableName: aws.String(expected.Name), BillingMode: aws.String(dynamodb.BillingModePayPerRequest)} _, err := d.DynamoDB.UpdateTableWithContext(ctx, updateTableInput) return err @@ -343,7 +344,7 @@ func (d dynamoTableClient) UpdateTable(ctx context.Context, current, expected ch }); err != nil { recordDynamoError(expected.Name, err, "DynamoDB.UpdateTable", d.metrics) if awsErr, ok := err.(awserr.Error); ok && awsErr.Code() == "LimitExceededException" { - level.Warn(util.Logger).Log("msg", "update limit exceeded", "err", err) + level.Warn(log.Logger).Log("msg", "update limit exceeded", "err", err) } else { return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go index fea098c8233..b8aae77f523 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/metrics_autoscaling.go @@ -14,7 +14,7 @@ import ( "github.com/weaveworks/common/mtime" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -112,7 +112,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc, throttleRate := m.throttleRates[expected.Name] usageRate := m.usageRates[expected.Name] - level.Info(util.Logger).Log("msg", "checking write metrics", "table", current.Name, "queueLengths", fmt.Sprint(m.queueLengths), "throttleRate", throttleRate, "usageRate", usageRate) + level.Info(util_log.Logger).Log("msg", "checking write metrics", "table", current.Name, "queueLengths", fmt.Sprint(m.queueLengths), "throttleRate", throttleRate, "usageRate", usageRate) switch { case throttleRate < throttleFractionScaledown*float64(current.ProvisionedWrite) && @@ -170,7 +170,7 @@ func (m *metricsData) UpdateTable(ctx context.Context, current chunk.TableDesc, readUsageRate := m.usageReadRates[expected.Name] readErrorRate := m.readErrorRates[expected.Name] - level.Info(util.Logger).Log("msg", "checking read metrics", "table", current.Name, "errorRate", readErrorRate, "readUsageRate", readUsageRate) + level.Info(util_log.Logger).Log("msg", "checking read metrics", "table", current.Name, "errorRate", readErrorRate, "readUsageRate", readUsageRate) // Read Scaling switch { // the table is at low/minimum capacity and it is being used -> scale up @@ -235,14 +235,14 @@ func scaleDown(tableName string, currentValue, minValue int64, newValue int64, l earliest := lastUpdated[tableName].Add(time.Duration(coolDown) * time.Second) if earliest.After(mtime.Now()) { - level.Info(util.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest, "op", operation) + level.Info(util_log.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest, "op", operation) return currentValue } // Reject a change that is less than 20% - AWS rate-limits scale-downs so save // our chances until it makes a bigger difference if newValue > currentValue*4/5 { - level.Info(util.Logger).Log("msg", "rejected de minimis "+msg, "table", tableName, "current", currentValue, "proposed", newValue, "op", operation) + level.Info(util_log.Logger).Log("msg", "rejected de minimis "+msg, "table", tableName, "current", currentValue, "proposed", newValue, "op", operation) return currentValue } @@ -254,12 +254,12 @@ func scaleDown(tableName string, currentValue, minValue int64, newValue int64, l totalUsage += u } if totalUsage < minUsageForScaledown { - level.Info(util.Logger).Log("msg", "rejected low usage "+msg, "table", tableName, "totalUsage", totalUsage, "op", operation) + level.Info(util_log.Logger).Log("msg", "rejected low usage "+msg, "table", tableName, "totalUsage", totalUsage, "op", operation) return currentValue } } - level.Info(util.Logger).Log("msg", msg, "table", tableName, operation, newValue) + level.Info(util_log.Logger).Log("msg", msg, "table", tableName, operation, newValue) lastUpdated[tableName] = mtime.Now() return newValue } @@ -270,12 +270,12 @@ func scaleUp(tableName string, currentValue, maxValue int64, newValue int64, las } earliest := lastUpdated[tableName].Add(time.Duration(coolDown) * time.Second) if !earliest.After(mtime.Now()) && newValue > currentValue { - level.Info(util.Logger).Log("msg", msg, "table", tableName, operation, newValue) + level.Info(util_log.Logger).Log("msg", msg, "table", tableName, operation, newValue) lastUpdated[tableName] = mtime.Now() return newValue } - level.Info(util.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest) + level.Info(util_log.Logger).Log("msg", "deferring "+msg, "table", tableName, "till", earliest) return currentValue } @@ -362,7 +362,7 @@ func promQuery(ctx context.Context, promAPI promV1.API, query string, duration, return nil, err } if wrngs != nil { - level.Warn(util.Logger).Log( + level.Warn(util_log.Logger).Log( "query", query, "start", queryRange.Start, "end", queryRange.End, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go index 864e410dd5c..a6e09b36149 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/mock.go @@ -18,7 +18,7 @@ import ( "github.com/aws/aws-sdk-go/service/s3/s3iface" "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const arnPrefix = "arn:" @@ -234,7 +234,7 @@ func (m *mockDynamoDBClient) QueryPagesWithContext(ctx aws.Context, input *dynam continue } } else { - level.Warn(util.Logger).Log("msg", "unsupported FilterExpression", "expression", *input.FilterExpression) + level.Warn(util_log.Logger).Log("msg", "unsupported FilterExpression", "expression", *input.FilterExpression) } } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go index db4bafcf384..2c7ec0fa71f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/s3_storage_client.go @@ -72,6 +72,7 @@ type S3Config struct { SSEEncryption bool `yaml:"sse_encryption"` HTTPConfig HTTPConfig `yaml:"http_config"` SignatureVersion string `yaml:"signature_version"` + SSEConfig SSEConfig `yaml:"sse"` Inject InjectRequestMiddleware `yaml:"-"` } @@ -100,7 +101,11 @@ func (cfg *S3Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.AccessKeyID, prefix+"s3.access-key-id", "", "AWS Access Key ID") f.StringVar(&cfg.SecretAccessKey, prefix+"s3.secret-access-key", "", "AWS Secret Access Key") f.BoolVar(&cfg.Insecure, prefix+"s3.insecure", false, "Disable https on s3 connection.") - f.BoolVar(&cfg.SSEEncryption, prefix+"s3.sse-encryption", false, "Enable AES256 AWS Server Side Encryption") + + // TODO Remove in Cortex 1.9.0 + f.BoolVar(&cfg.SSEEncryption, prefix+"s3.sse-encryption", false, "Enable AWS Server Side Encryption [Deprecated: Use .sse instead. if s3.sse-encryption is enabled, it assumes .sse.type SSE-S3]") + + cfg.SSEConfig.RegisterFlagsWithPrefix(prefix+"s3.sse.", f) f.DurationVar(&cfg.HTTPConfig.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The maximum amount of time an idle connection will be held open.") f.DurationVar(&cfg.HTTPConfig.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 0, "If non-zero, specifies the amount of time to wait for a server's response headers after fully writing the request.") @@ -117,9 +122,9 @@ func (cfg *S3Config) Validate() error { } type S3ObjectClient struct { - bucketNames []string - S3 s3iface.S3API - sseEncryption *string + bucketNames []string + S3 s3iface.S3API + sseConfig *SSEParsedConfig } // NewS3ObjectClient makes a new S3-backed ObjectClient. @@ -140,19 +145,34 @@ func NewS3ObjectClient(cfg S3Config) (*S3ObjectClient, error) { s3Client.Handlers.Sign.Swap(v4.SignRequestHandler.Name, v2SignRequestHandler(cfg)) } - var sseEncryption *string - if cfg.SSEEncryption { - sseEncryption = aws.String("AES256") + sseCfg, err := buildSSEParsedConfig(cfg) + if err != nil { + return nil, errors.Wrap(err, "failed to build SSE config") } client := S3ObjectClient{ - S3: s3Client, - bucketNames: bucketNames, - sseEncryption: sseEncryption, + S3: s3Client, + bucketNames: bucketNames, + sseConfig: sseCfg, } return &client, nil } +func buildSSEParsedConfig(cfg S3Config) (*SSEParsedConfig, error) { + if cfg.SSEConfig.Type != "" { + return NewSSEParsedConfig(cfg.SSEConfig) + } + + // deprecated, but if used it assumes SSE-S3 type + if cfg.SSEEncryption { + return NewSSEParsedConfig(SSEConfig{ + Type: SSES3, + }) + } + + return nil, nil +} + func v2SignRequestHandler(cfg S3Config) request.NamedHandler { return request.NamedHandler{ Name: "v2.SignRequestHandler", @@ -324,15 +344,22 @@ func (a *S3ObjectClient) GetObject(ctx context.Context, objectKey string) (io.Re return resp.Body, nil } -// Put object into the store +// PutObject into the store func (a *S3ObjectClient) PutObject(ctx context.Context, objectKey string, object io.ReadSeeker) error { return instrument.CollectedRequest(ctx, "S3.PutObject", s3RequestDuration, instrument.ErrorCode, func(ctx context.Context) error { - _, err := a.S3.PutObjectWithContext(ctx, &s3.PutObjectInput{ - Body: object, - Bucket: aws.String(a.bucketFromKey(objectKey)), - Key: aws.String(objectKey), - ServerSideEncryption: a.sseEncryption, - }) + putObjectInput := &s3.PutObjectInput{ + Body: object, + Bucket: aws.String(a.bucketFromKey(objectKey)), + Key: aws.String(objectKey), + } + + if a.sseConfig != nil { + putObjectInput.ServerSideEncryption = aws.String(a.sseConfig.ServerSideEncryption) + putObjectInput.SSEKMSKeyId = a.sseConfig.KMSKeyID + putObjectInput.SSEKMSEncryptionContext = a.sseConfig.KMSEncryptionContext + } + + _, err := a.S3.PutObjectWithContext(ctx, putObjectInput) return err }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/sse_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/sse_config.go new file mode 100644 index 00000000000..b62000fdc4d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/aws/sse_config.go @@ -0,0 +1,86 @@ +package aws + +import ( + "encoding/base64" + "encoding/json" + "flag" + + "github.com/pkg/errors" +) + +const ( + // SSEKMS config type constant to configure S3 server side encryption using KMS + // https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html + SSEKMS = "SSE-KMS" + sseKMSType = "aws:kms" + // SSES3 config type constant to configure S3 server side encryption with AES-256 + // https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html + SSES3 = "SSE-S3" + sseS3Type = "AES256" +) + +// SSEParsedConfig configures server side encryption (SSE) +// struct used internally to configure AWS S3 +type SSEParsedConfig struct { + ServerSideEncryption string + KMSKeyID *string + KMSEncryptionContext *string +} + +// SSEConfig configures S3 server side encryption +// struct that is going to receive user input (through config file or CLI) +type SSEConfig struct { + Type string `yaml:"type"` + KMSKeyID string `yaml:"kms_key_id"` + KMSEncryptionContext string `yaml:"kms_encryption_context"` +} + +// RegisterFlagsWithPrefix adds the flags required to config this to the given FlagSet +func (cfg *SSEConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&cfg.Type, prefix+"type", "", "Enable AWS Server Side Encryption. Only SSE-S3 and SSE-KMS are supported") + f.StringVar(&cfg.KMSKeyID, prefix+"kms-key-id", "", "KMS Key ID used to encrypt objects in S3") + f.StringVar(&cfg.KMSEncryptionContext, prefix+"kms-encryption-context", "", "KMS Encryption Context used for object encryption. It expects a JSON as a string.") +} + +// NewSSEParsedConfig creates a struct to configure server side encryption (SSE) +func NewSSEParsedConfig(cfg SSEConfig) (*SSEParsedConfig, error) { + switch cfg.Type { + case SSES3: + return &SSEParsedConfig{ + ServerSideEncryption: sseS3Type, + }, nil + case SSEKMS: + if cfg.KMSKeyID == "" { + return nil, errors.New("KMS key id must be passed when SSE-KMS encryption is selected") + } + + parsedKMSEncryptionContext, err := parseKMSEncryptionContext(cfg.KMSEncryptionContext) + if err != nil { + return nil, errors.Wrap(err, "failed to parse KMS encryption context") + } + + return &SSEParsedConfig{ + ServerSideEncryption: sseKMSType, + KMSKeyID: &cfg.KMSKeyID, + KMSEncryptionContext: parsedKMSEncryptionContext, + }, nil + default: + return nil, errors.New("SSE type is empty or invalid") + } +} + +func parseKMSEncryptionContext(kmsEncryptionContext string) (*string, error) { + if kmsEncryptionContext == "" { + return nil, nil + } + + // validates if kmsEncryptionContext is a valid JSON + jsonKMSEncryptionContext, err := json.Marshal(json.RawMessage(kmsEncryptionContext)) + if err != nil { + return nil, errors.Wrap(err, "failed to marshal KMS encryption context") + } + + parsedKMSEncryptionContext := base64.StdEncoding.EncodeToString([]byte(jsonKMSEncryptionContext)) + + return &parsedKMSEncryptionContext, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go index 7a68262233c..c6a1dee6242 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/azure/blob_storage_client.go @@ -16,6 +16,7 @@ import ( chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -93,7 +94,7 @@ type BlobStorage struct { // NewBlobStorage creates a new instance of the BlobStorage struct. func NewBlobStorage(cfg *BlobStorageConfig) (*BlobStorage, error) { - util.WarnExperimentalUse("Azure Blob Storage") + log.WarnExperimentalUse("Azure Blob Storage") blobStorage := &BlobStorage{ cfg: cfg, } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go index 81432d1a1e5..1215a6cff11 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/fifo_cache.go @@ -15,8 +15,8 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -93,7 +93,7 @@ type cacheEntry struct { // NewFifoCache returns a new initialised FifoCache of size. func NewFifoCache(name string, cfg FifoCacheConfig, reg prometheus.Registerer, logger log.Logger) *FifoCache { - util.WarnExperimentalUse("In-memory (FIFO) cache") + util_log.WarnExperimentalUse("In-memory (FIFO) cache") if cfg.DeprecatedSize > 0 { flagext.DeprecatedFlagsUsed.Inc() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go index 8b89d3e1fe5..5f2fa0dc4f6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached.go @@ -16,7 +16,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" instr "github.com/weaveworks/common/instrument" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -186,7 +186,7 @@ func (c *Memcached) fetchKeysBatched(ctx context.Context, keys []string) (found go func() { for i, j := 0, 0; i < len(keys); i += batchSize { - batchKeys := keys[i:util.Min(i+batchSize, len(keys))] + batchKeys := keys[i:math.Min(i+batchSize, len(keys))] c.inputCh <- &work{ keys: batchKeys, ctx: ctx, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go index ddb08972049..b0826d9bfd8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/memcached_client.go @@ -19,7 +19,7 @@ import ( "github.com/sony/gobreaker" "github.com/thanos-io/thanos/pkg/discovery/dns" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // MemcachedClient interface exists for mocking memcacheClient. @@ -132,7 +132,7 @@ func NewMemcachedClient(cfg MemcachedClientConfig, name string, r prometheus.Reg } if len(cfg.Addresses) > 0 { - util.WarnExperimentalUse("DNS-based memcached service discovery") + util_log.WarnExperimentalUse("DNS-based memcached service discovery") newClient.addresses = strings.Split(cfg.Addresses, ",") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go index 5887bd84eed..efe1e786e77 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cache/redis_cache.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // RedisCache type caches chunks in redis @@ -18,7 +18,7 @@ type RedisCache struct { // NewRedisCache creates a new RedisCache func NewRedisCache(name string, redisClient *RedisClient, logger log.Logger) *RedisCache { - util.WarnExperimentalUse("Redis cache") + util_log.WarnExperimentalUse("Redis cache") cache := &RedisCache{ name: name, redis: redisClient, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go index 1e638f6091f..2c802b764fa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/cassandra/storage_client.go @@ -19,8 +19,8 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" - pkgutil "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Config for a StorageClient @@ -109,7 +109,7 @@ func (cfg *Config) session(name string, reg prometheus.Registerer) (*gocql.Sessi cluster.ConnectTimeout = cfg.ConnectTimeout cluster.ReconnectInterval = cfg.ReconnectInterval cluster.NumConns = cfg.NumConnections - cluster.Logger = log.With(pkgutil.Logger, "module", "gocql", "client", name) + cluster.Logger = log.With(util_log.Logger, "module", "gocql", "client", name) cluster.Registerer = prometheus.WrapRegistererWith( prometheus.Labels{"client": name}, reg) if cfg.Retries > 0 { @@ -536,7 +536,7 @@ type noopConvictionPolicy struct{} // Convicted means connections are removed - we don't want that. // Implementats gocql.ConvictionPolicy. func (noopConvictionPolicy) AddFailure(err error, host *gocql.HostInfo) bool { - level.Error(pkgutil.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String()) + level.Error(util_log.Logger).Log("msg", "Cassandra host failure", "err", err, "host", host.String()) return false } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go index 0c212abb2fe..0fc096d3c80 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store.go @@ -21,6 +21,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -539,7 +540,7 @@ func (c *baseStore) lookupEntriesByQueries(ctx context.Context, queries []IndexQ return true }) if err != nil { - level.Error(util.WithContext(ctx, util.Logger)).Log("msg", "error querying storage", "err", err) + level.Error(util_log.WithContext(ctx, util_log.Logger)).Log("msg", "error querying storage", "err", err) } return entries, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go index 061a9b1c638..aeb90488540 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/chunk_store_utils.go @@ -10,7 +10,7 @@ import ( "github.com/prometheus/prometheus/promql" "github.com/cortexproject/cortex/pkg/chunk/cache" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -211,7 +211,7 @@ func (c *Fetcher) processCacheResponse(ctx context.Context, chunks []Chunk, keys missing = append(missing, chunks[i]) i++ } else if chunkKey > keys[j] { - level.Warn(util.Logger).Log("msg", "got chunk from cache we didn't ask for") + level.Warn(util_log.Logger).Log("msg", "got chunk from cache we didn't ask for") j++ } else { requests = append(requests, decodeRequest{ diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go index a3c5a22b20e..d3c79013bbf 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/composite_store.go @@ -19,7 +19,7 @@ type StoreLimits interface { } type CacheGenNumLoader interface { - GetStoreCacheGenNumber(userID string) string + GetStoreCacheGenNumber(tenantIDs []string) string } // Store for chunks. @@ -217,7 +217,7 @@ func (c compositeStore) forStores(ctx context.Context, userID string, from, thro return nil } - ctx = c.injectCacheGen(ctx, userID) + ctx = c.injectCacheGen(ctx, []string{userID}) // first, find the schema with the highest start _before or at_ from i := sort.Search(len(c.stores), func(i int) bool { @@ -262,10 +262,10 @@ func (c compositeStore) forStores(ctx context.Context, userID string, from, thro return nil } -func (c compositeStore) injectCacheGen(ctx context.Context, userID string) context.Context { +func (c compositeStore) injectCacheGen(ctx context.Context, tenantIDs []string) context.Context { if c.cacheGenNumLoader == nil { return ctx } - return cache.InjectCacheGenNumber(ctx, c.cacheGenNumLoader.GetStoreCacheGenNumber(userID)) + return cache.InjectCacheGenNumber(ctx, c.cacheGenNumLoader.GetStoreCacheGenNumber(tenantIDs)) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go index 042507eef45..8992f358d5e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_index_client.go @@ -17,8 +17,8 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/grpcclient" + "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -51,6 +51,8 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&cfg.TableCacheEnabled, "bigtable.table-cache.enabled", true, "If enabled, once a tables info is fetched, it is cached.") f.DurationVar(&cfg.TableCacheExpiration, "bigtable.table-cache.expiration", 30*time.Minute, "Duration to cache tables before checking again.") + // This overrides our default from TLS disabled to TLS enabled + cfg.GRPCClientConfig.TLSEnabled = true cfg.GRPCClientConfig.RegisterFlagsWithPrefix("bigtable", f) } @@ -73,8 +75,11 @@ type storageClientV1 struct { // NewStorageClientV1 returns a new v1 StorageClient. func NewStorageClientV1(ctx context.Context, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { - opts := toOptions(cfg.GRPCClientConfig.DialOption(bigtableInstrumentation())) - client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, opts...) + dialOpts, err := cfg.GRPCClientConfig.DialOption(bigtableInstrumentation()) + if err != nil { + return nil, err + } + client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...) if err != nil { return nil, err } @@ -97,8 +102,11 @@ func newStorageClientV1(cfg Config, schemaCfg chunk.SchemaConfig, client *bigtab // NewStorageClientColumnKey returns a new v2 StorageClient. func NewStorageClientColumnKey(ctx context.Context, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.IndexClient, error) { - opts := toOptions(cfg.GRPCClientConfig.DialOption(bigtableInstrumentation())) - client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, opts...) + dialOpts, err := cfg.GRPCClientConfig.DialOption(bigtableInstrumentation()) + if err != nil { + return nil, err + } + client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...) if err != nil { return nil, err } @@ -244,7 +252,7 @@ func (s *storageClientColumnKey) QueryPages(ctx context.Context, queries []chunk table := s.client.Open(tq.name) for i := 0; i < len(tq.rows); i += maxRowReads { - page := tq.rows[i:util.Min(i+maxRowReads, len(tq.rows))] + page := tq.rows[i:math.Min(i+maxRowReads, len(tq.rows))] go func(page bigtable.RowList, tq tableQuery) { var processingErr error // rows are returned in key order, not order in row list diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go index a0cc62013b7..2a18195a4b9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/bigtable_object_client.go @@ -10,7 +10,7 @@ import ( "github.com/pkg/errors" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/math" ) type bigtableObjectClient struct { @@ -22,8 +22,11 @@ type bigtableObjectClient struct { // NewBigtableObjectClient makes a new chunk.Client that stores chunks in // Bigtable. func NewBigtableObjectClient(ctx context.Context, cfg Config, schemaCfg chunk.SchemaConfig) (chunk.Client, error) { - opts := toOptions(cfg.GRPCClientConfig.DialOption(bigtableInstrumentation())) - client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, opts...) + dialOpts, err := cfg.GRPCClientConfig.DialOption(bigtableInstrumentation()) + if err != nil { + return nil, err + } + client, err := bigtable.NewClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...) if err != nil { return nil, err } @@ -109,7 +112,7 @@ func (s *bigtableObjectClient) GetChunks(ctx context.Context, input []chunk.Chun ) for i := 0; i < len(keys); i += maxRowReads { - page := keys[i:util.Min(i+maxRowReads, len(keys))] + page := keys[i:math.Min(i+maxRowReads, len(keys))] go func(page bigtable.RowList) { decodeContext := chunk.NewDecodeContext() diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go index 268e087c579..26d032b4831 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/gcp/table_client.go @@ -24,8 +24,11 @@ type tableClient struct { // NewTableClient returns a new TableClient. func NewTableClient(ctx context.Context, cfg Config) (chunk.TableClient, error) { - opts := toOptions(cfg.GRPCClientConfig.DialOption(bigtableInstrumentation())) - client, err := bigtable.NewAdminClient(ctx, cfg.Project, cfg.Instance, opts...) + dialOpts, err := cfg.GRPCClientConfig.DialOption(bigtableInstrumentation()) + if err != nil { + return nil, err + } + client, err := bigtable.NewAdminClient(ctx, cfg.Project, cfg.Instance, toOptions(dialOpts)...) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go index f982c59d745..e9ae3282e1f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/inmemory_storage_client.go @@ -13,7 +13,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" ) type MockStorageMode int @@ -174,7 +174,7 @@ func (m *MockStorage) BatchWrite(ctx context.Context, batch WriteBatch) error { } seenWrites[key] = true - level.Debug(util.WithContext(ctx, util.Logger)).Log("msg", "write", "hash", req.hashValue, "range", req.rangeValue) + level.Debug(log.WithContext(ctx, log.Logger)).Log("msg", "write", "hash", req.hashValue, "range", req.rangeValue) items := table.items[req.hashValue] @@ -247,7 +247,7 @@ func (m *MockStorage) QueryPages(ctx context.Context, queries []IndexQuery, call } func (m *MockStorage) query(ctx context.Context, query IndexQuery, callback func(ReadBatch) (shouldContinue bool)) error { - logger := util.WithContext(ctx, util.Logger) + logger := log.WithContext(ctx, log.Logger) level.Debug(logger).Log("msg", "QueryPages", "query", query.HashValue) table, ok := m.tables[query.TableName] diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go index 6d76abf9e3d..bbb814badad 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/boltdb_index_client.go @@ -17,7 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" chunk_util "github.com/cortexproject/cortex/pkg/chunk/util" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var ( @@ -94,7 +94,7 @@ func (b *BoltIndexClient) reload() { for name := range b.dbs { if _, err := os.Stat(path.Join(b.cfg.Directory, name)); err != nil && os.IsNotExist(err) { removedDBs = append(removedDBs, name) - level.Debug(util.Logger).Log("msg", "boltdb file got removed", "filename", name) + level.Debug(util_log.Logger).Log("msg", "boltdb file got removed", "filename", name) continue } } @@ -106,7 +106,7 @@ func (b *BoltIndexClient) reload() { for _, name := range removedDBs { if err := b.dbs[name].Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err) + level.Error(util_log.Logger).Log("msg", "failed to close removed boltdb", "filename", name, "err", err) continue } delete(b.dbs, name) diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go index f6477694348..ff9b5e44b2c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/local/fs_object_client.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/chunk/util" - pkgUtil "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // FSConfig is the config for a FSObjectClient. @@ -80,7 +80,7 @@ func (f *FSObjectClient) PutObject(_ context.Context, objectKey string, object i return err } - defer runutil.CloseWithLogOnErr(pkgUtil.Logger, fl, "fullPath: %s", fullPath) + defer runutil.CloseWithLogOnErr(util_log.Logger, fl, "fullPath: %s", fullPath) _, err = io.Copy(fl, object) if err != nil { @@ -187,7 +187,7 @@ func (f *FSObjectClient) DeleteObject(ctx context.Context, objectKey string) err func (f *FSObjectClient) DeleteChunksBefore(ctx context.Context, ts time.Time) error { return filepath.Walk(f.cfg.Directory, func(path string, info os.FileInfo, err error) error { if !info.IsDir() && info.ModTime().Before(ts) { - level.Info(pkgUtil.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) + level.Info(util_log.Logger).Log("msg", "file has exceeded the retention period, removing it", "filepath", info.Name()) if err := os.Remove(path); err != nil { return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go index 7cab4748330..19935844c0e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/openstack/swift_object_client.go @@ -9,10 +9,10 @@ import ( "io/ioutil" "github.com/ncw/swift" - thanos "github.com/thanos-io/thanos/pkg/objstore/swift" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + cortex_swift "github.com/cortexproject/cortex/pkg/storage/bucket/swift" + "github.com/cortexproject/cortex/pkg/util/log" ) type SwiftObjectClient struct { @@ -22,7 +22,7 @@ type SwiftObjectClient struct { // SwiftConfig is config for the Swift Chunk Client. type SwiftConfig struct { - thanos.SwiftConfig `yaml:",inline"` + cortex_swift.Config `yaml:",inline"` } // RegisterFlags registers flags. @@ -37,42 +37,30 @@ func (cfg *SwiftConfig) Validate() error { // RegisterFlagsWithPrefix registers flags with prefix. func (cfg *SwiftConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - f.StringVar(&cfg.ContainerName, prefix+"swift.container-name", "cortex", "Name of the Swift container to put chunks in.") - f.StringVar(&cfg.DomainName, prefix+"swift.domain-name", "", "Openstack user's domain name.") - f.StringVar(&cfg.DomainId, prefix+"swift.domain-id", "", "Openstack user's domain id.") - f.StringVar(&cfg.UserDomainName, prefix+"swift.user-domain-name", "", "Openstack user's domain name.") - f.StringVar(&cfg.UserDomainID, prefix+"swift.user-domain-id", "", "Openstack user's domain id.") - f.StringVar(&cfg.Username, prefix+"swift.username", "", "Openstack username for the api.") - f.StringVar(&cfg.UserId, prefix+"swift.user-id", "", "Openstack userid for the api.") - f.StringVar(&cfg.Password, prefix+"swift.password", "", "Openstack api key.") - f.StringVar(&cfg.AuthUrl, prefix+"swift.auth-url", "", "Openstack authentication URL.") - f.StringVar(&cfg.RegionName, prefix+"swift.region-name", "", "Openstack Region to use eg LON, ORD - default is use first region (v2,v3 auth only)") - f.StringVar(&cfg.ProjectName, prefix+"swift.project-name", "", "Openstack project name (v2,v3 auth only).") - f.StringVar(&cfg.ProjectID, prefix+"swift.project-id", "", "Openstack project id (v2,v3 auth only).") - f.StringVar(&cfg.ProjectDomainName, prefix+"swift.project-domain-name", "", "Name of the project's domain (v3 auth only), only needed if it differs from the user domain.") - f.StringVar(&cfg.ProjectDomainID, prefix+"swift.project-domain-id", "", "Id of the project's domain (v3 auth only), only needed if it differs the from user domain.") + cfg.Config.RegisterFlagsWithPrefix(prefix, f) } // NewSwiftObjectClient makes a new chunk.Client that writes chunks to OpenStack Swift. func NewSwiftObjectClient(cfg SwiftConfig) (*SwiftObjectClient, error) { - util.WarnExperimentalUse("OpenStack Swift Storage") + log.WarnExperimentalUse("OpenStack Swift Storage") // Create a connection c := &swift.Connection{ - AuthUrl: cfg.AuthUrl, - ApiKey: cfg.Password, - UserName: cfg.Username, - UserId: cfg.UserId, - + AuthVersion: cfg.AuthVersion, + AuthUrl: cfg.AuthURL, + ApiKey: cfg.Password, + UserName: cfg.Username, + UserId: cfg.UserID, + Retries: cfg.MaxRetries, + ConnectTimeout: cfg.ConnectTimeout, + Timeout: cfg.RequestTimeout, TenantId: cfg.ProjectID, Tenant: cfg.ProjectName, TenantDomain: cfg.ProjectDomainName, TenantDomainId: cfg.ProjectDomainID, - - Domain: cfg.DomainName, - DomainId: cfg.DomainId, - - Region: cfg.RegionName, + Domain: cfg.DomainName, + DomainId: cfg.DomainID, + Region: cfg.RegionName, } switch { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go index 543865739dc..930eb24c4e7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/blocks_purger_api.go @@ -4,6 +4,7 @@ import ( "context" "net/http" "strings" + "time" "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" @@ -44,7 +45,7 @@ func (api *BlocksPurgerAPI) DeleteTenant(w http.ResponseWriter, r *http.Request) return } - err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID) + err = cortex_tsdb.WriteTenantDeletionMark(r.Context(), api.bucketClient, userID, cortex_tsdb.NewTenantDeletionMark(time.Now())) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -58,8 +59,8 @@ func (api *BlocksPurgerAPI) DeleteTenant(w http.ResponseWriter, r *http.Request) type DeleteTenantStatusResponse struct { TenantID string `json:"tenant_id"` BlocksDeleted bool `json:"blocks_deleted"` - RuleGroupsDeleted bool `json:"rule_groups_deleted"` - AlertManagerConfigDeleted bool `json:"alert_manager_config_deleted"` + RuleGroupsDeleted bool `json:"rule_groups_deleted,omitempty"` // Not yet supported. + AlertManagerConfigDeleted bool `json:"alert_manager_config_deleted,omitempty"` // Not yet supported. } func (api *BlocksPurgerAPI) DeleteTenantStatus(w http.ResponseWriter, r *http.Request) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go index 5e7131a587d..42a222803fc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/purger.go @@ -22,7 +22,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -140,7 +140,7 @@ type Purger struct { // NewPurger creates a new Purger func NewPurger(cfg Config, deleteStore *DeleteStore, chunkStore chunk.Store, storageClient chunk.ObjectClient, registerer prometheus.Registerer) (*Purger, error) { - util.WarnExperimentalUse("Delete series API") + util_log.WarnExperimentalUse("Delete series API") purger := Purger{ cfg: cfg, @@ -180,7 +180,7 @@ func (p *Purger) loop(ctx context.Context) error { err := p.pullDeleteRequestsToPlanDeletes() if err != nil { status = statusFail - level.Error(util.Logger).Log("msg", "error pulling delete requests for building plans", "err", err) + level.Error(util_log.Logger).Log("msg", "error pulling delete requests for building plans", "err", err) } p.metrics.loadPendingRequestsAttempsTotal.WithLabelValues(status).Inc() @@ -221,14 +221,14 @@ func (p *Purger) retryFailedRequests() { for _, userID := range userIDsWithFailedRequest { deleteRequest := p.inProcessRequests.get(userID) if deleteRequest == nil { - level.Error(util.Logger).Log("msg", "expected an in-process delete request", "user", userID) + level.Error(util_log.Logger).Log("msg", "expected an in-process delete request", "user", userID) continue } p.inProcessRequests.unsetFailedRequestForUser(userID) err := p.resumeStalledRequest(*deleteRequest) if err != nil { - reqWithLogger := makeDeleteRequestWithLogger(*deleteRequest, util.Logger) + reqWithLogger := makeDeleteRequestWithLogger(*deleteRequest, util_log.Logger) level.Error(reqWithLogger.logger).Log("msg", "failed to resume failed request", "err", err) } } @@ -407,7 +407,7 @@ func (p *Purger) loadInprocessDeleteRequests() error { for i := range inprocessRequests { deleteRequest := inprocessRequests[i] p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) - req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) + req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) level.Info(req.logger).Log("msg", "resuming in process delete requests", "status", deleteRequest.Status) err = p.resumeStalledRequest(deleteRequest) @@ -421,7 +421,7 @@ func (p *Purger) loadInprocessDeleteRequests() error { } func (p *Purger) resumeStalledRequest(deleteRequest DeleteRequest) error { - req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) + req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) if deleteRequest.Status == StatusBuildingPlan { err := p.buildDeletePlan(req) @@ -479,7 +479,7 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { p.usersWithPendingRequests[deleteRequest.UserID] = struct{}{} p.usersWithPendingRequestsMtx.Unlock() - level.Debug(util.Logger).Log("msg", "skipping delete request processing for now since another request from same user is already in process", + level.Debug(util_log.Logger).Log("msg", "skipping delete request processing for now since another request from same user is already in process", "inprocess_request_id", inprocessDeleteRequest.RequestID, "skipped_request_id", deleteRequest.RequestID, "user_id", deleteRequest.UserID) continue @@ -492,7 +492,7 @@ func (p *Purger) pullDeleteRequestsToPlanDeletes() error { deleteRequest.Status = StatusBuildingPlan p.inProcessRequests.set(deleteRequest.UserID, &deleteRequest) - req := makeDeleteRequestWithLogger(deleteRequest, util.Logger) + req := makeDeleteRequestWithLogger(deleteRequest, util_log.Logger) level.Info(req.logger).Log("msg", "building plan for a new delete request") diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go index 0799716afad..d8fc70d788d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/request_handler.go @@ -15,6 +15,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type deleteRequestHandlerMetrics struct { @@ -107,7 +108,7 @@ func (dm *DeleteRequestHandler) AddDeleteRequestHandler(w http.ResponseWriter, r } if err := dm.deleteStore.AddDeleteRequest(ctx, userID, model.Time(startTime), model.Time(endTime), match); err != nil { - level.Error(util.Logger).Log("msg", "error adding delete request to the store", "err", err) + level.Error(util_log.Logger).Log("msg", "error adding delete request to the store", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -127,13 +128,13 @@ func (dm *DeleteRequestHandler) GetAllDeleteRequestsHandler(w http.ResponseWrite deleteRequests, err := dm.deleteStore.GetAllDeleteRequestsForUser(ctx, userID) if err != nil { - level.Error(util.Logger).Log("msg", "error getting delete requests from the store", "err", err) + level.Error(util_log.Logger).Log("msg", "error getting delete requests from the store", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } if err := json.NewEncoder(w).Encode(deleteRequests); err != nil { - level.Error(util.Logger).Log("msg", "error marshalling response", "err", err) + level.Error(util_log.Logger).Log("msg", "error marshalling response", "err", err) http.Error(w, fmt.Sprintf("Error marshalling response: %v", err), http.StatusInternalServerError) } } @@ -152,7 +153,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter deleteRequest, err := dm.deleteStore.GetDeleteRequest(ctx, userID, requestID) if err != nil { - level.Error(util.Logger).Log("msg", "error getting delete request from the store", "err", err) + level.Error(util_log.Logger).Log("msg", "error getting delete request from the store", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -173,7 +174,7 @@ func (dm *DeleteRequestHandler) CancelDeleteRequestHandler(w http.ResponseWriter } if err := dm.deleteStore.RemoveDeleteRequest(ctx, userID, requestID, deleteRequest.CreatedAt, deleteRequest.StartTime, deleteRequest.EndTime); err != nil { - level.Error(util.Logger).Log("msg", "error cancelling the delete request", "err", err) + level.Error(util_log.Logger).Log("msg", "error cancelling the delete request", "err", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go index 1f1ad1b5bec..fdf2cc0914d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/purger/tombstones.go @@ -3,6 +3,7 @@ package purger import ( "context" "sort" + "strconv" "sync" "time" @@ -14,7 +15,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/promql/parser" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const tombstonesReloadDuration = 5 * time.Minute @@ -96,7 +97,7 @@ func (tl *TombstonesLoader) loop() { case <-tombstonesReloadTimer.C: err := tl.reloadTombstones() if err != nil { - level.Error(util.Logger).Log("msg", "error reloading tombstones", "err", err) + level.Error(util_log.Logger).Log("msg", "error reloading tombstones", "err", err) } case <-tl.quit: return @@ -246,14 +247,64 @@ func (tl *TombstonesLoader) loadPendingTombstones(userID string) error { } // GetStoreCacheGenNumber returns store cache gen number for a user -func (tl *TombstonesLoader) GetStoreCacheGenNumber(userID string) string { - return tl.getCacheGenNumbers(userID).store - +func (tl *TombstonesLoader) GetStoreCacheGenNumber(tenantIDs []string) string { + return tl.getCacheGenNumbersPerTenants(tenantIDs).store } // GetResultsCacheGenNumber returns results cache gen number for a user -func (tl *TombstonesLoader) GetResultsCacheGenNumber(userID string) string { - return tl.getCacheGenNumbers(userID).results +func (tl *TombstonesLoader) GetResultsCacheGenNumber(tenantIDs []string) string { + return tl.getCacheGenNumbersPerTenants(tenantIDs).results +} + +func (tl *TombstonesLoader) getCacheGenNumbersPerTenants(tenantIDs []string) *cacheGenNumbers { + var result cacheGenNumbers + + if len(tenantIDs) == 0 { + return &result + } + + // keep the maximum value that's currently in result + var maxResults, maxStore int + + for pos, tenantID := range tenantIDs { + numbers := tl.getCacheGenNumbers(tenantID) + + // handle first tenant in the list + if pos == 0 { + // short cut if there is only one tenant + if len(tenantIDs) == 1 { + return numbers + } + + // set first tenant string whatever happens next + result.results = numbers.results + result.store = numbers.store + } + + // set results number string if it's higher than the ones before + if numbers.results != "" { + results, err := strconv.Atoi(numbers.results) + if err != nil { + level.Error(util_log.Logger).Log("msg", "error parsing resultsCacheGenNumber", "user", tenantID, "err", err) + } else if maxResults < results { + maxResults = results + result.results = numbers.results + } + } + + // set store number string if it's higher than the ones before + if numbers.store != "" { + store, err := strconv.Atoi(numbers.store) + if err != nil { + level.Error(util_log.Logger).Log("msg", "error parsing storeCacheGenNumber", "user", tenantID, "err", err) + } else if maxStore < store { + maxStore = store + result.store = numbers.store + } + } + } + + return &result } func (tl *TombstonesLoader) getCacheGenNumbers(userID string) *cacheGenNumbers { @@ -275,7 +326,7 @@ func (tl *TombstonesLoader) getCacheGenNumbers(userID string) *cacheGenNumbers { genNumbers, err := tl.deleteStore.getCacheGenerationNumbers(context.Background(), userID) if err != nil { - level.Error(util.Logger).Log("msg", "error loading cache generation numbers", "err", err) + level.Error(util_log.Logger).Log("msg", "error loading cache generation numbers", "err", err) tl.metrics.cacheGenLoadFailures.Inc() return &cacheGenNumbers{} } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go index 441fc5f84cd..6f6cc8f4d30 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema.go @@ -14,7 +14,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/cortexproject/cortex/pkg/querier/astmapper" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -882,7 +882,7 @@ func (v10Entries) FilterReadQueries(queries []IndexQuery, shard *astmapper.Shard s := strings.Split(query.HashValue, ":")[0] n, err := strconv.Atoi(s) if err != nil { - level.Error(util.Logger).Log( + level.Error(util_log.Logger).Log( "msg", "Unable to determine shard from IndexQuery", "HashValue", diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go index 9a37bb0c61e..ce37df393b3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/schema_config.go @@ -13,7 +13,8 @@ import ( "github.com/weaveworks/common/mtime" yaml "gopkg.in/yaml.v2" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/math" ) const ( @@ -260,8 +261,8 @@ func (cfg *PeriodConfig) hourlyBuckets(from, through model.Time, userID string) ) for i := fromHour; i <= throughHour; i++ { - relativeFrom := util.Max64(0, int64(from)-(i*millisecondsInHour)) - relativeThrough := util.Min64(millisecondsInHour, int64(through)-(i*millisecondsInHour)) + relativeFrom := math.Max64(0, int64(from)-(i*millisecondsInHour)) + relativeThrough := math.Min64(millisecondsInHour, int64(through)-(i*millisecondsInHour)) result = append(result, Bucket{ from: uint32(relativeFrom), through: uint32(relativeThrough), @@ -291,8 +292,8 @@ func (cfg *PeriodConfig) dailyBuckets(from, through model.Time, userID string) [ // include in the range keys - we use a uint32 - as we then have to base 32 // encode it. - relativeFrom := util.Max64(0, int64(from)-(i*millisecondsInDay)) - relativeThrough := util.Min64(millisecondsInDay, int64(through)-(i*millisecondsInDay)) + relativeFrom := math.Max64(0, int64(from)-(i*millisecondsInDay)) + relativeThrough := math.Min64(millisecondsInDay, int64(through)-(i*millisecondsInDay)) result = append(result, Bucket{ from: uint32(relativeFrom), through: uint32(relativeThrough), @@ -394,7 +395,7 @@ func (cfg *PeriodicTableConfig) periodicTables(from, through model.Time, pCfg Pr if (i*periodSecs)-beginGraceSecs <= now && now < (i*periodSecs)+periodSecs+endGraceSecs { table = pCfg.ActiveTableProvisionConfig.BuildTableDesc(tableName, cfg.Tags) - level.Debug(util.Logger).Log("msg", "Table is Active", + level.Debug(log.Logger).Log("msg", "Table is Active", "tableName", table.Name, "provisionedRead", table.ProvisionedRead, "provisionedWrite", table.ProvisionedWrite, @@ -409,7 +410,7 @@ func (cfg *PeriodicTableConfig) periodicTables(from, through model.Time, pCfg Pr disableAutoscale := i < (nowWeek - pCfg.InactiveWriteScaleLastN) table = pCfg.InactiveTableProvisionConfig.BuildTableDesc(tableName, cfg.Tags, disableAutoscale) - level.Debug(util.Logger).Log("msg", "Table is Inactive", + level.Debug(log.Logger).Log("msg", "Table is Inactive", "tableName", table.Name, "provisionedRead", table.ProvisionedRead, "provisionedWrite", table.ProvisionedWrite, diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go index 6097b434b10..310404cd133 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/storage/factory.go @@ -23,7 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk/objectclient" "github.com/cortexproject/cortex/pkg/chunk/openstack" "github.com/cortexproject/cortex/pkg/chunk/purger" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Supported storage engines @@ -105,7 +105,7 @@ func (cfg *Config) Validate() error { if err := cfg.CassandraStorageConfig.Validate(); err != nil { return errors.Wrap(err, "invalid Cassandra Storage config") } - if err := cfg.GCPStorageConfig.Validate(util.Logger); err != nil { + if err := cfg.GCPStorageConfig.Validate(util_log.Logger); err != nil { return errors.Wrap(err, "invalid GCP Storage Storage config") } if err := cfg.Swift.Validate(); err != nil { @@ -222,7 +222,7 @@ func NewIndexClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, regis } path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } return aws.NewDynamoDBIndexClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) case "gcp": @@ -256,7 +256,7 @@ func NewChunkClient(name string, cfg Config, schemaCfg chunk.SchemaConfig, regis } path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } return aws.NewDynamoDBChunkClient(cfg.AWSStorageConfig.DynamoDBConfig, schemaCfg, registerer) case "azure": @@ -308,7 +308,7 @@ func NewTableClient(name string, cfg Config, registerer prometheus.Registerer) ( } path := strings.TrimPrefix(cfg.AWSStorageConfig.DynamoDB.URL.Path, "/") if len(path) > 0 { - level.Warn(util.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) + level.Warn(util_log.Logger).Log("msg", "ignoring DynamoDB URL path", "path", path) } return aws.NewDynamoDBTableClient(cfg.AWSStorageConfig.DynamoDBConfig, registerer) case "gcp", "gcp-columnkey", "bigtable", "bigtable-hashed": diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go index eda8a83f753..c4f46830471 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/table_manager.go @@ -18,7 +18,7 @@ import ( "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/mtime" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -215,7 +215,7 @@ func (m *TableManager) loop(ctx context.Context) error { if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error { return m.SyncTables(ctx) }); err != nil { - level.Error(util.Logger).Log("msg", "error syncing tables", "err", err) + level.Error(util_log.Logger).Log("msg", "error syncing tables", "err", err) } // Sleep for a bit to spread the sync load across different times if the tablemanagers are all started at once. @@ -231,7 +231,7 @@ func (m *TableManager) loop(ctx context.Context) error { if err := instrument.CollectedRequest(context.Background(), "TableManager.SyncTables", instrument.NewHistogramCollector(m.metrics.syncTableDuration), instrument.ErrorCode, func(ctx context.Context) error { return m.SyncTables(ctx) }); err != nil { - level.Error(util.Logger).Log("msg", "error syncing tables", "err", err) + level.Error(util_log.Logger).Log("msg", "error syncing tables", "err", err) } case <-ctx.Done(): return nil @@ -254,7 +254,7 @@ func (m *TableManager) checkAndCreateExtraTables() error { for _, tableDesc := range extraTables.Tables { if _, ok := existingTablesMap[tableDesc.Name]; !ok { // creating table - level.Info(util.Logger).Log("msg", "creating extra table", + level.Info(util_log.Logger).Log("msg", "creating extra table", "tableName", tableDesc.Name, "provisionedRead", tableDesc.ProvisionedRead, "provisionedWrite", tableDesc.ProvisionedWrite, @@ -272,7 +272,7 @@ func (m *TableManager) checkAndCreateExtraTables() error { continue } - level.Info(util.Logger).Log("msg", "checking throughput of extra table", "table", tableDesc.Name) + level.Info(util_log.Logger).Log("msg", "checking throughput of extra table", "table", tableDesc.Name) // table already exists, lets check actual throughput for tables is same as what is in configurations, if not let us update it current, _, err := extraTables.TableClient.DescribeTable(context.Background(), tableDesc.Name) if err != nil { @@ -280,7 +280,7 @@ func (m *TableManager) checkAndCreateExtraTables() error { } if !current.Equals(tableDesc) { - level.Info(util.Logger).Log("msg", "updating throughput of extra table", + level.Info(util_log.Logger).Log("msg", "updating throughput of extra table", "table", tableDesc.Name, "tableName", tableDesc.Name, "provisionedRead", tableDesc.ProvisionedRead, @@ -305,7 +305,7 @@ func (m *TableManager) bucketRetentionIteration(ctx context.Context) error { err := m.bucketClient.DeleteChunksBefore(ctx, mtime.Now().Add(-m.cfg.RetentionPeriod)) if err != nil { - level.Error(util.Logger).Log("msg", "error enforcing filesystem retention", "err", err) + level.Error(util_log.Logger).Log("msg", "error enforcing filesystem retention", "err", err) } // don't return error, otherwise timer service would stop. @@ -321,7 +321,7 @@ func (m *TableManager) SyncTables(ctx context.Context) error { } expected := m.calculateExpectedTables() - level.Info(util.Logger).Log("msg", "synching tables", "expected_tables", len(expected)) + level.Info(util_log.Logger).Log("msg", "synching tables", "expected_tables", len(expected)) toCreate, toCheckThroughput, toDelete, err := m.partitionTables(ctx, expected) if err != nil { @@ -473,7 +473,7 @@ func (m *TableManager) createTables(ctx context.Context, descriptions []TableDes merr := tsdb_errors.NewMulti() for _, desc := range descriptions { - level.Info(util.Logger).Log("msg", "creating table", "table", desc.Name) + level.Info(util_log.Logger).Log("msg", "creating table", "table", desc.Name) err := m.client.CreateTable(ctx, desc) if err != nil { numFailures++ @@ -490,12 +490,12 @@ func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDes merr := tsdb_errors.NewMulti() for _, desc := range descriptions { - level.Info(util.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name) + level.Info(util_log.Logger).Log("msg", "table has exceeded the retention period", "table", desc.Name) if !m.cfg.RetentionDeletesEnabled { continue } - level.Info(util.Logger).Log("msg", "deleting table", "table", desc.Name) + level.Info(util_log.Logger).Log("msg", "deleting table", "table", desc.Name) err := m.client.DeleteTable(ctx, desc.Name) if err != nil { numFailures++ @@ -509,7 +509,7 @@ func (m *TableManager) deleteTables(ctx context.Context, descriptions []TableDes func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDesc) error { for _, expected := range descriptions { - level.Debug(util.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name) + level.Debug(util_log.Logger).Log("msg", "checking provisioned throughput on table", "table", expected.Name) current, isActive, err := m.client.DescribeTable(ctx, expected.Name) if err != nil { return err @@ -523,12 +523,12 @@ func (m *TableManager) updateTables(ctx context.Context, descriptions []TableDes } if !isActive { - level.Info(util.Logger).Log("msg", "skipping update on table, not yet ACTIVE", "table", expected.Name) + level.Info(util_log.Logger).Log("msg", "skipping update on table, not yet ACTIVE", "table", expected.Name) continue } if expected.Equals(current) { - level.Info(util.Logger).Log("msg", "provisioned throughput on table, skipping", "table", current.Name, "read", current.ProvisionedRead, "write", current.ProvisionedWrite) + level.Info(util_log.Logger).Log("msg", "provisioned throughput on table, skipping", "table", current.Name, "read", current.ProvisionedRead, "write", current.ProvisionedWrite) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/chunk/util/util.go b/vendor/github.com/cortexproject/cortex/pkg/chunk/util/util.go index b6af3444589..3241d749436 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/chunk/util/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/chunk/util/util.go @@ -11,7 +11,7 @@ import ( ot "github.com/opentracing/opentracing-go" "github.com/cortexproject/cortex/pkg/chunk" - "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/math" ) // Callback from an IndexQuery. @@ -36,7 +36,7 @@ func DoParallelQueries( queue := make(chan chunk.IndexQuery) incomingErrors := make(chan error) - n := util.Min(len(queries), QueryParallelism) + n := math.Min(len(queries), QueryParallelism) // Run n parallel goroutines fetching queries from the queue for i := 0; i < n; i++ { go func() { diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go index c6ddf87402e..0742b0b4e64 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/blocks_cleaner.go @@ -2,7 +2,6 @@ package compactor import ( "context" - "path" "time" "github.com/go-kit/kit/log" @@ -13,22 +12,23 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" - "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/objstore" "github.com/cortexproject/cortex/pkg/storage/bucket" cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb" + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) type BlocksCleanerConfig struct { - DataDir string - MetaSyncConcurrency int - DeletionDelay time.Duration - CleanupInterval time.Duration - CleanupConcurrency int + DeletionDelay time.Duration + CleanupInterval time.Duration + CleanupConcurrency int + BlockDeletionMarksMigrationEnabled bool // TODO Discuss whether we should remove it in Cortex 1.8.0 and document that upgrading to 1.7.0 before 1.8.0 is required. + TenantCleanupDelay time.Duration // Delay before removing tenant deletion mark and "debug". } type BlocksCleaner struct { @@ -39,13 +39,20 @@ type BlocksCleaner struct { bucketClient objstore.Bucket usersScanner *cortex_tsdb.UsersScanner + // Keep track of the last owned users. + lastOwnedUsers []string + // Metrics. - runsStarted prometheus.Counter - runsCompleted prometheus.Counter - runsFailed prometheus.Counter - runsLastSuccess prometheus.Gauge - blocksCleanedTotal prometheus.Counter - blocksFailedTotal prometheus.Counter + runsStarted prometheus.Counter + runsCompleted prometheus.Counter + runsFailed prometheus.Counter + runsLastSuccess prometheus.Gauge + blocksCleanedTotal prometheus.Counter + blocksFailedTotal prometheus.Counter + tenantBlocks *prometheus.GaugeVec + tenantMarkedBlocks *prometheus.GaugeVec + tenantPartialBlocks *prometheus.GaugeVec + tenantBucketIndexLastUpdate *prometheus.GaugeVec } func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, usersScanner *cortex_tsdb.UsersScanner, logger log.Logger, reg prometheus.Registerer) *BlocksCleaner { @@ -78,6 +85,26 @@ func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, use Name: "cortex_compactor_block_cleanup_failures_total", Help: "Total number of blocks failed to be deleted.", }), + + // The following metrics don't have the "cortex_compactor" prefix because not strictly related to + // the compactor. They're just tracked by the compactor because it's the most logical place where these + // metrics can be tracked. + tenantBlocks: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_bucket_blocks_count", + Help: "Total number of blocks in the bucket. Includes blocks marked for deletion, but not partial blocks.", + }, []string{"user"}), + tenantMarkedBlocks: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_bucket_blocks_marked_for_deletion_count", + Help: "Total number of blocks marked for deletion in the bucket.", + }, []string{"user"}), + tenantPartialBlocks: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_bucket_blocks_partials_count", + Help: "Total number of partial blocks.", + }, []string{"user"}), + tenantBucketIndexLastUpdate: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{ + Name: "cortex_bucket_index_last_successful_update_timestamp_seconds", + Help: "Timestamp of the last successful update of a tenant's bucket index.", + }, []string{"user"}), } c.Service = services.NewTimerService(cfg.CleanupInterval, c.starting, c.ticker, nil) @@ -88,62 +115,80 @@ func NewBlocksCleaner(cfg BlocksCleanerConfig, bucketClient objstore.Bucket, use func (c *BlocksCleaner) starting(ctx context.Context) error { // Run a cleanup so that any other service depending on this service // is guaranteed to start once the initial cleanup has been done. - c.runCleanup(ctx) + c.runCleanup(ctx, true) return nil } func (c *BlocksCleaner) ticker(ctx context.Context) error { - c.runCleanup(ctx) + c.runCleanup(ctx, false) return nil } -func (c *BlocksCleaner) runCleanup(ctx context.Context) { - level.Info(c.logger).Log("msg", "started hard deletion of blocks marked for deletion, and blocks for tenants marked for deletion") +func (c *BlocksCleaner) runCleanup(ctx context.Context, firstRun bool) { + level.Info(c.logger).Log("msg", "started blocks cleanup and maintenance") c.runsStarted.Inc() - if err := c.cleanUsers(ctx); err == nil { - level.Info(c.logger).Log("msg", "successfully completed hard deletion of blocks marked for deletion, and blocks for tenants marked for deletion") + if err := c.cleanUsers(ctx, firstRun); err == nil { + level.Info(c.logger).Log("msg", "successfully completed blocks cleanup and maintenance") c.runsCompleted.Inc() c.runsLastSuccess.SetToCurrentTime() } else if errors.Is(err, context.Canceled) { - level.Info(c.logger).Log("msg", "canceled hard deletion of blocks marked for deletion, and blocks for tenants marked for deletion", "err", err) + level.Info(c.logger).Log("msg", "canceled blocks cleanup and maintenance", "err", err) return } else { - level.Error(c.logger).Log("msg", "failed to hard delete blocks marked for deletion, and blocks for tenants marked for deletion", "err", err.Error()) + level.Error(c.logger).Log("msg", "failed to run blocks cleanup and maintenance", "err", err.Error()) c.runsFailed.Inc() } } -func (c *BlocksCleaner) cleanUsers(ctx context.Context) error { +func (c *BlocksCleaner) cleanUsers(ctx context.Context, firstRun bool) error { users, deleted, err := c.usersScanner.ScanUsers(ctx) if err != nil { return errors.Wrap(err, "failed to discover users from bucket") } - isDeleted := map[string]bool{} - for _, userID := range deleted { - isDeleted[userID] = true + isActive := util.StringsMap(users) + isDeleted := util.StringsMap(deleted) + allUsers := append(users, deleted...) + + // Delete per-tenant metrics for all tenants not belonging anymore to this shard. + // Such tenants have been moved to a different shard, so their updated metrics will + // be exported by the new shard. + for _, userID := range c.lastOwnedUsers { + if !isActive[userID] && !isDeleted[userID] { + c.tenantBlocks.DeleteLabelValues(userID) + c.tenantMarkedBlocks.DeleteLabelValues(userID) + c.tenantPartialBlocks.DeleteLabelValues(userID) + c.tenantBucketIndexLastUpdate.DeleteLabelValues(userID) + } } + c.lastOwnedUsers = allUsers - allUsers := append(users, deleted...) return concurrency.ForEachUser(ctx, allUsers, c.cfg.CleanupConcurrency, func(ctx context.Context, userID string) error { if isDeleted[userID] { - return errors.Wrapf(c.deleteUser(ctx, userID), "failed to delete blocks for user marked for deletion: %s", userID) + return errors.Wrapf(c.deleteUserMarkedForDeletion(ctx, userID), "failed to delete user marked for deletion: %s", userID) } - return errors.Wrapf(c.cleanUser(ctx, userID), "failed to delete blocks for user: %s", userID) + return errors.Wrapf(c.cleanUser(ctx, userID, firstRun), "failed to delete blocks for user: %s", userID) }) } -// Remove all blocks for user marked for deletion. -func (c *BlocksCleaner) deleteUser(ctx context.Context, userID string) error { - userLogger := util.WithUserID(userID, c.logger) +// Remove blocks and remaining data for tenant marked for deletion. +func (c *BlocksCleaner) deleteUserMarkedForDeletion(ctx context.Context, userID string) error { + userLogger := util_log.WithUserID(userID, c.logger) userBucket := bucket.NewUserBucketClient(userID, c.bucketClient) - level.Info(userLogger).Log("msg", "deleting blocks for user marked for deletion") + level.Info(userLogger).Log("msg", "deleting blocks for tenant marked for deletion") - var deleted, failed int + // We immediately delete the bucket index, to signal to its consumers that + // the tenant has "no blocks" in the storage. + if err := bucketindex.DeleteIndex(ctx, c.bucketClient, userID); err != nil { + return err + } + c.tenantBucketIndexLastUpdate.DeleteLabelValues(userID) + + var deletedBlocks, failed int err := userBucket.Iter(ctx, "", func(name string) error { if err := ctx.Err(); err != nil { return err @@ -162,7 +207,7 @@ func (c *BlocksCleaner) deleteUser(ctx context.Context, userID string) error { return nil // Continue with other blocks. } - deleted++ + deletedBlocks++ c.blocksCleanedTotal.Inc() level.Info(userLogger).Log("msg", "deleted block", "block", id) return nil @@ -173,75 +218,154 @@ func (c *BlocksCleaner) deleteUser(ctx context.Context, userID string) error { } if failed > 0 { + // The number of blocks left in the storage is equal to the number of blocks we failed + // to delete. We also consider them all marked for deletion given the next run will try + // to delete them again. + c.tenantBlocks.WithLabelValues(userID).Set(float64(failed)) + c.tenantMarkedBlocks.WithLabelValues(userID).Set(float64(failed)) + c.tenantPartialBlocks.WithLabelValues(userID).Set(0) + return errors.Errorf("failed to delete %d blocks", failed) } - level.Info(userLogger).Log("msg", "finished deleting blocks for user marked for deletion", "deletedBlocks", deleted) + // Given all blocks have been deleted, we can also remove the metrics. + c.tenantBlocks.DeleteLabelValues(userID) + c.tenantMarkedBlocks.DeleteLabelValues(userID) + c.tenantPartialBlocks.DeleteLabelValues(userID) + + if deletedBlocks > 0 { + level.Info(userLogger).Log("msg", "deleted blocks for tenant marked for deletion", "deletedBlocks", deletedBlocks) + } + + mark, err := cortex_tsdb.ReadTenantDeletionMark(ctx, c.bucketClient, userID) + if err != nil { + return errors.Wrap(err, "failed to read tenant deletion mark") + } + if mark == nil { + return errors.Wrap(err, "cannot find tenant deletion mark anymore") + } + + // If we have just deleted some blocks, update "finished" time. Also update "finished" time if it wasn't set yet, but there are no blocks. + // Note: this UPDATES the tenant deletion mark. Components that use caching bucket will NOT SEE this update, + // but that is fine -- they only check whether tenant deletion marker exists or not. + if deletedBlocks > 0 || mark.FinishedTime == 0 { + level.Info(userLogger).Log("msg", "updating finished time in tenant deletion mark") + mark.FinishedTime = time.Now().Unix() + return errors.Wrap(cortex_tsdb.WriteTenantDeletionMark(ctx, c.bucketClient, userID, mark), "failed to update tenant deletion mark") + } + + if time.Since(time.Unix(mark.FinishedTime, 0)) < c.cfg.TenantCleanupDelay { + return nil + } + + level.Info(userLogger).Log("msg", "cleaning up remaining blocks data for tenant marked for deletion") + + // Let's do final cleanup of tenant. + if deleted, err := bucket.DeletePrefix(ctx, userBucket, block.DebugMetas, userLogger); err != nil { + return errors.Wrap(err, "failed to delete "+block.DebugMetas) + } else if deleted > 0 { + level.Info(userLogger).Log("msg", "deleted files under "+block.DebugMetas+" for tenant marked for deletion", "count", deleted) + } + + // Tenant deletion mark file is inside Markers as well. + if deleted, err := bucket.DeletePrefix(ctx, userBucket, bucketindex.MarkersPathname, userLogger); err != nil { + return errors.Wrap(err, "failed to delete marker files") + } else if deleted > 0 { + level.Info(userLogger).Log("msg", "deleted marker files for tenant marked for deletion", "count", deleted) + } + return nil } -func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string) error { - userLogger := util.WithUserID(userID, c.logger) +func (c *BlocksCleaner) cleanUser(ctx context.Context, userID string, firstRun bool) (returnErr error) { + userLogger := util_log.WithUserID(userID, c.logger) userBucket := bucket.NewUserBucketClient(userID, c.bucketClient) + startTime := time.Now() + + level.Info(userLogger).Log("msg", "started blocks cleanup and maintenance") + defer func() { + if returnErr != nil { + level.Warn(userLogger).Log("msg", "failed blocks cleanup and maintenance", "err", returnErr) + } else { + level.Info(userLogger).Log("msg", "completed blocks cleanup and maintenance", "duration", time.Since(startTime)) + } + }() + + // Migrate block deletion marks to the global markers location. This operation is a best-effort. + if firstRun && c.cfg.BlockDeletionMarksMigrationEnabled { + if err := bucketindex.MigrateBlockDeletionMarksToGlobalLocation(ctx, c.bucketClient, userID); err != nil { + level.Warn(userLogger).Log("msg", "failed to migrate block deletion marks to the global markers location", "err", err) + } else { + level.Info(userLogger).Log("msg", "migrated block deletion marks to the global markers location") + } + } - ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(userLogger, userBucket, c.cfg.DeletionDelay, c.cfg.MetaSyncConcurrency) - - fetcher, err := block.NewMetaFetcher( - userLogger, - c.cfg.MetaSyncConcurrency, - userBucket, - // The fetcher stores cached metas in the "meta-syncer/" sub directory, - // but we prefix it in order to guarantee no clashing with the compactor. - path.Join(c.cfg.DataDir, "blocks-cleaner-meta-"+userID), - // No metrics. - nil, - []block.MetadataFilter{ignoreDeletionMarkFilter}, - nil, - ) - if err != nil { - return errors.Wrap(err, "error creating metadata fetcher") + // Read the bucket index. + idx, err := bucketindex.ReadIndex(ctx, c.bucketClient, userID, c.logger) + if errors.Is(err, bucketindex.ErrIndexCorrupted) { + level.Warn(userLogger).Log("msg", "found a corrupted bucket index, recreating it") + } else if err != nil && !errors.Is(err, bucketindex.ErrIndexNotFound) { + return err } - // Runs a bucket scan to get a fresh list of all blocks and populate - // the list of deleted blocks in filter. - _, partials, err := fetcher.Fetch(ctx) + // Generate an updated in-memory version of the bucket index. + w := bucketindex.NewUpdater(c.bucketClient, userID, c.logger) + idx, partials, err := w.UpdateIndex(ctx, idx) if err != nil { - return errors.Wrap(err, "error fetching metadata") + return err } - cleaner := compact.NewBlocksCleaner( - userLogger, - userBucket, - ignoreDeletionMarkFilter, - c.cfg.DeletionDelay, - c.blocksCleanedTotal, - c.blocksFailedTotal) + // Delete blocks marked for deletion. We iterate over a copy of deletion marks because + // we'll need to manipulate the index (removing blocks which get deleted). + for _, mark := range idx.BlockDeletionMarks.Clone() { + if time.Since(mark.GetDeletionTime()).Seconds() <= c.cfg.DeletionDelay.Seconds() { + continue + } - if err := cleaner.DeleteMarkedBlocks(ctx); err != nil { - return errors.Wrap(err, "error cleaning blocks") + if err := block.Delete(ctx, userLogger, userBucket, mark.ID); err != nil { + c.blocksFailedTotal.Inc() + level.Warn(userLogger).Log("msg", "failed to delete block marked for deletion", "block", mark.ID, "err", err) + continue + } + + // Remove the block from the bucket index too. + idx.RemoveBlock(mark.ID) + + c.blocksCleanedTotal.Inc() + level.Info(userLogger).Log("msg", "deleted block marked for deletion", "block", mark.ID) } // Partial blocks with a deletion mark can be cleaned up. This is a best effort, so we don't return // error if the cleanup of partial blocks fail. if len(partials) > 0 { - level.Info(userLogger).Log("msg", "started cleaning of partial blocks marked for deletion") - c.cleanUserPartialBlocks(ctx, partials, userBucket, userLogger) - level.Info(userLogger).Log("msg", "cleaning of partial blocks marked for deletion done") + c.cleanUserPartialBlocks(ctx, partials, idx, userBucket, userLogger) } + // Upload the updated index to the storage. + if err := bucketindex.WriteIndex(ctx, c.bucketClient, userID, idx); err != nil { + return err + } + + c.tenantBlocks.WithLabelValues(userID).Set(float64(len(idx.Blocks))) + c.tenantMarkedBlocks.WithLabelValues(userID).Set(float64(len(idx.BlockDeletionMarks))) + c.tenantPartialBlocks.WithLabelValues(userID).Set(float64(len(partials))) + c.tenantBucketIndexLastUpdate.WithLabelValues(userID).SetToCurrentTime() + return nil } -func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map[ulid.ULID]error, userBucket *bucket.UserBucketClient, userLogger log.Logger) { +// cleanUserPartialBlocks delete partial blocks which are safe to be deleted. The provided partials map +// is updated accordingly. +func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map[ulid.ULID]error, idx *bucketindex.Index, userBucket *bucket.UserBucketClient, userLogger log.Logger) { for blockID, blockErr := range partials { // We can safely delete only blocks which are partial because the meta.json is missing. - if blockErr != block.ErrorSyncMetaNotFound { + if !errors.Is(blockErr, bucketindex.ErrBlockMetaNotFound) { continue } // We can safely delete only partial blocks with a deletion mark. err := metadata.ReadMarker(ctx, userLogger, userBucket, blockID.String(), &metadata.DeletionMark{}) - if err == metadata.ErrorMarkerNotFound { + if errors.Is(err, metadata.ErrorMarkerNotFound) { continue } if err != nil { @@ -257,6 +381,10 @@ func (c *BlocksCleaner) cleanUserPartialBlocks(ctx context.Context, partials map continue } + // Remove the block from the bucket index too. + idx.RemoveBlock(blockID) + delete(partials, blockID) + c.blocksCleanedTotal.Inc() level.Info(userLogger).Log("msg", "deleted partial block marked for deletion", "block", blockID) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go index d52b776a839..78e2e9a6d68 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor.go @@ -16,7 +16,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/tsdb" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/compact" "github.com/thanos-io/thanos/pkg/compact/downsample" @@ -28,13 +27,55 @@ import ( "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) var ( errInvalidBlockRanges = "compactor block range periods should be divisible by the previous one, but %s is not divisible by %s" + RingOp = ring.NewOp([]ring.IngesterState{ring.ACTIVE}, nil) + + DefaultBlocksGrouperFactory = func(ctx context.Context, cfg Config, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer, blocksMarkedForDeletion prometheus.Counter, garbageCollectedBlocks prometheus.Counter) compact.Grouper { + return compact.NewDefaultGrouper( + logger, + bkt, + false, // Do not accept malformed indexes + true, // Enable vertical compaction + reg, + blocksMarkedForDeletion, + garbageCollectedBlocks) + } + + DefaultBlocksCompactorFactory = func(ctx context.Context, cfg Config, logger log.Logger, reg prometheus.Registerer) (compact.Compactor, compact.Planner, error) { + compactor, err := tsdb.NewLeveledCompactor(ctx, reg, logger, cfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) + if err != nil { + return nil, nil, err + } + + planner := compact.NewTSDBBasedPlanner(logger, cfg.BlockRanges.ToMilliseconds()) + return compactor, planner, nil + } ) +// BlocksGrouperFactory builds and returns the grouper to use to compact a tenant's blocks. +type BlocksGrouperFactory func( + ctx context.Context, + cfg Config, + bkt objstore.Bucket, + logger log.Logger, + reg prometheus.Registerer, + blocksMarkedForDeletion prometheus.Counter, + garbageCollectedBlocks prometheus.Counter, +) compact.Grouper + +// BlocksCompactorFactory builds and returns the compactor and planner to use to compact a tenant's blocks. +type BlocksCompactorFactory func( + ctx context.Context, + cfg Config, + logger log.Logger, + reg prometheus.Registerer, +) (compact.Compactor, compact.Planner, error) + // Config holds the Compactor config. type Config struct { BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"` @@ -45,8 +86,13 @@ type Config struct { CompactionInterval time.Duration `yaml:"compaction_interval"` CompactionRetries int `yaml:"compaction_retries"` CompactionConcurrency int `yaml:"compaction_concurrency"` + CleanupInterval time.Duration `yaml:"cleanup_interval"` CleanupConcurrency int `yaml:"cleanup_concurrency"` DeletionDelay time.Duration `yaml:"deletion_delay"` + TenantCleanupDelay time.Duration `yaml:"tenant_cleanup_delay"` + + // Whether the migration of block deletion marks to the global markers location is enabled. + BlockDeletionMarksMigrationEnabled bool `yaml:"block_deletion_marks_migration_enabled"` EnabledTenants flagext.StringSliceCSV `yaml:"enabled_tenants"` DisabledTenants flagext.StringSliceCSV `yaml:"disabled_tenants"` @@ -60,6 +106,10 @@ type Config struct { // it in tests. retryMinBackoff time.Duration `yaml:"-"` retryMaxBackoff time.Duration `yaml:"-"` + + // Allow downstream projects to customise the blocks compactor. + BlocksGrouperFactory BlocksGrouperFactory `yaml:"-"` + BlocksCompactorFactory BlocksCompactorFactory `yaml:"-"` } // RegisterFlags registers the Compactor flags. @@ -76,14 +126,16 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MetaSyncConcurrency, "compactor.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from the long term storage.") f.StringVar(&cfg.DataDir, "compactor.data-dir", "./data", "Data directory in which to cache blocks and process compactions") f.DurationVar(&cfg.CompactionInterval, "compactor.compaction-interval", time.Hour, "The frequency at which the compaction runs") - f.IntVar(&cfg.CompactionRetries, "compactor.compaction-retries", 3, "How many times to retry a failed compaction during a single compaction interval") + f.IntVar(&cfg.CompactionRetries, "compactor.compaction-retries", 3, "How many times to retry a failed compaction within a single compaction run.") f.IntVar(&cfg.CompactionConcurrency, "compactor.compaction-concurrency", 1, "Max number of concurrent compactions running.") - f.IntVar(&cfg.CleanupConcurrency, "compactor.cleanup-concurrency", 20, "Max number of tenants for which blocks should be cleaned up concurrently (deletion of blocks previously marked for deletion).") + f.DurationVar(&cfg.CleanupInterval, "compactor.cleanup-interval", 15*time.Minute, "How frequently compactor should run blocks cleanup and maintenance, as well as update the bucket index.") + f.IntVar(&cfg.CleanupConcurrency, "compactor.cleanup-concurrency", 20, "Max number of tenants for which blocks cleanup and maintenance should run concurrently.") f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.") f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+ - "If not 0, blocks will be marked for deletion and compactor component will delete blocks marked for deletion from the bucket. "+ - "If delete-delay is 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures, "+ - "if store gateway still has the block loaded, or compactor is ignoring the deletion because it's compacting the block at the same time.") + "If not 0, blocks will be marked for deletion and compactor component will permanently delete blocks marked for deletion from the bucket. "+ + "If 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures.") + f.DurationVar(&cfg.TenantCleanupDelay, "compactor.tenant-cleanup-delay", 6*time.Hour, "For tenants marked for deletion, this is time between deleting of last block, and doing final cleanup (marker files, debug files) of the tenant.") + f.BoolVar(&cfg.BlockDeletionMarksMigrationEnabled, "compactor.block-deletion-marks-migration-enabled", true, "When enabled, at compactor startup the bucket will be scanned and all found deletion marks inside the block location will be copied to the markers global location too. This option can (and should) be safely disabled as soon as the compactor has successfully run at least once.") f.Var(&cfg.EnabledTenants, "compactor.enabled-tenants", "Comma separated list of tenants that can be compacted. If specified, only these tenants will be compacted by compactor, otherwise all tenants can be compacted. Subject to sharding.") f.Var(&cfg.DisabledTenants, "compactor.disabled-tenants", "Comma separated list of tenants that cannot be compacted by this compactor. If specified, and compactor would normally pick given tenant for compaction (via -compactor.enabled-tenants or sharding), it will be ignored instead.") @@ -116,9 +168,11 @@ type Compactor struct { // If empty, no users are disabled. If not empty, users in the map are disabled (not owned by this compactor). disabledUsers map[string]struct{} - // Function that creates bucket client, TSDB planner and compactor using the context. + // Functions that creates bucket client, grouper, planner and compactor using the context. // Useful for injecting mock objects from tests. - createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) + bucketClientFactory func(ctx context.Context) (objstore.Bucket, error) + blocksGrouperFactory BlocksGrouperFactory + blocksCompactorFactory BlocksCompactorFactory // Users scanner, used to discover users from the bucket. usersScanner *cortex_tsdb.UsersScanner @@ -127,8 +181,8 @@ type Compactor struct { blocksCleaner *BlocksCleaner // Underlying compactor and planner used to compact TSDB blocks. - tsdbCompactor tsdb.Compactor - tsdbPlanner compact.Planner + blocksCompactor compact.Compactor + blocksPlanner compact.Planner // Client used to run operations on the bucket storing blocks. bucketClient objstore.Bucket @@ -157,22 +211,21 @@ type Compactor struct { // NewCompactor makes a new Compactor. func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) { - createDependencies := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error) { - bucketClient, err := bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) - if err != nil { - return nil, nil, nil, errors.Wrap(err, "failed to create the bucket client") - } + bucketClientFactory := func(ctx context.Context) (objstore.Bucket, error) { + return bucket.NewClient(ctx, storageCfg.Bucket, "compactor", logger, registerer) + } - compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool()) - if err != nil { - return nil, nil, nil, err - } + blocksGrouperFactory := compactorCfg.BlocksGrouperFactory + if blocksGrouperFactory == nil { + blocksGrouperFactory = DefaultBlocksGrouperFactory + } - planner := compact.NewTSDBBasedPlanner(logger, compactorCfg.BlockRanges.ToMilliseconds()) - return bucketClient, compactor, planner, nil + blocksCompactorFactory := compactorCfg.BlocksCompactorFactory + if blocksCompactorFactory == nil { + blocksCompactorFactory = DefaultBlocksCompactorFactory } - cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createDependencies) + cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, bucketClientFactory, blocksGrouperFactory, blocksCompactorFactory) if err != nil { return nil, errors.Wrap(err, "failed to create Cortex blocks compactor") } @@ -185,16 +238,20 @@ func newCompactor( storageCfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, registerer prometheus.Registerer, - createDependencies func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, compact.Planner, error), + bucketClientFactory func(ctx context.Context) (objstore.Bucket, error), + blocksGrouperFactory BlocksGrouperFactory, + blocksCompactorFactory BlocksCompactorFactory, ) (*Compactor, error) { c := &Compactor{ - compactorCfg: compactorCfg, - storageCfg: storageCfg, - parentLogger: logger, - logger: log.With(logger, "component", "compactor"), - registerer: registerer, - syncerMetrics: newSyncerMetrics(registerer), - createDependencies: createDependencies, + compactorCfg: compactorCfg, + storageCfg: storageCfg, + parentLogger: logger, + logger: log.With(logger, "component", "compactor"), + registerer: registerer, + syncerMetrics: newSyncerMetrics(registerer), + bucketClientFactory: bucketClientFactory, + blocksGrouperFactory: blocksGrouperFactory, + blocksCompactorFactory: blocksCompactorFactory, compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{ Name: "cortex_compactor_runs_started_total", @@ -265,10 +322,16 @@ func newCompactor( func (c *Compactor) starting(ctx context.Context) error { var err error - // Create bucket client and compactor. - c.bucketClient, c.tsdbCompactor, c.tsdbPlanner, err = c.createDependencies(ctx) + // Create bucket client. + c.bucketClient, err = c.bucketClientFactory(ctx) + if err != nil { + return errors.Wrap(err, "failed to create bucket client") + } + + // Create blocks compactor dependencies. + c.blocksCompactor, c.blocksPlanner, err = c.blocksCompactorFactory(ctx, c.compactorCfg, c.logger, c.registerer) if err != nil { - return errors.Wrap(err, "failed to initialize compactor objects") + return errors.Wrap(err, "failed to initialize compactor dependencies") } // Wrap the bucket client to write block deletion marks in the global location too. @@ -322,7 +385,7 @@ func (c *Compactor) starting(ctx context.Context) error { maxWaiting := c.compactorCfg.ShardingRing.WaitStabilityMaxDuration level.Info(c.logger).Log("msg", "waiting until compactor ring topology is stable", "min_waiting", minWaiting.String(), "max_waiting", maxWaiting.String()) - if err := ring.WaitRingStability(ctx, c.ring, ring.Compactor, minWaiting, maxWaiting); err != nil { + if err := ring.WaitRingStability(ctx, c.ring, RingOp, minWaiting, maxWaiting); err != nil { level.Warn(c.logger).Log("msg", "compactor is ring topology is not stable after the max waiting time, proceeding anyway") } else { level.Info(c.logger).Log("msg", "compactor is ring topology is stable") @@ -332,11 +395,11 @@ func (c *Compactor) starting(ctx context.Context) error { // Create the blocks cleaner (service). c.blocksCleaner = NewBlocksCleaner(BlocksCleanerConfig{ - DataDir: c.compactorCfg.DataDir, - MetaSyncConcurrency: c.compactorCfg.MetaSyncConcurrency, - DeletionDelay: c.compactorCfg.DeletionDelay, - CleanupInterval: util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.1), - CleanupConcurrency: c.compactorCfg.CleanupConcurrency, + DeletionDelay: c.compactorCfg.DeletionDelay, + CleanupInterval: util.DurationWithJitter(c.compactorCfg.CleanupInterval, 0.1), + CleanupConcurrency: c.compactorCfg.CleanupConcurrency, + BlockDeletionMarksMigrationEnabled: c.compactorCfg.BlockDeletionMarksMigrationEnabled, + TenantCleanupDelay: c.compactorCfg.TenantCleanupDelay, }, c.bucketClient, c.usersScanner, c.parentLogger, c.registerer) // Ensure an initial cleanup occurred before starting the compactor. @@ -360,7 +423,7 @@ func (c *Compactor) stopping(_ error) error { func (c *Compactor) running(ctx context.Context) error { // Run an initial compaction before starting the interval. - c.compactUsersWithRetries(ctx) + c.compactUsers(ctx) ticker := time.NewTicker(util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.05)) defer ticker.Stop() @@ -368,7 +431,7 @@ func (c *Compactor) running(ctx context.Context) error { for { select { case <-ticker.C: - c.compactUsersWithRetries(ctx) + c.compactUsers(ctx) case <-ctx.Done(): return nil case err := <-c.ringSubservicesWatcher.Chan(): @@ -377,33 +440,20 @@ func (c *Compactor) running(ctx context.Context) error { } } -func (c *Compactor) compactUsersWithRetries(ctx context.Context) { - retries := util.NewBackoff(ctx, util.BackoffConfig{ - MinBackoff: c.compactorCfg.retryMinBackoff, - MaxBackoff: c.compactorCfg.retryMaxBackoff, - MaxRetries: c.compactorCfg.CompactionRetries, - }) +func (c *Compactor) compactUsers(ctx context.Context) { + succeeded := false c.compactionRunsStarted.Inc() - for retries.Ongoing() { - if err := c.compactUsers(ctx); err == nil { + defer func() { + if succeeded { c.compactionRunsCompleted.Inc() c.compactionRunsLastSuccess.SetToCurrentTime() - return - } else if errors.Is(err, context.Canceled) { - return + } else { + c.compactionRunsFailed.Inc() } - retries.Wait() - } - - c.compactionRunsFailed.Inc() -} - -func (c *Compactor) compactUsers(ctx context.Context) error { - // Reset progress metrics once done. - defer func() { + // Reset progress metrics once done. c.compactionRunDiscoveredTenants.Set(0) c.compactionRunSkippedTenants.Set(0) c.compactionRunSucceededTenants.Set(0) @@ -411,10 +461,10 @@ func (c *Compactor) compactUsers(ctx context.Context) error { }() level.Info(c.logger).Log("msg", "discovering users from bucket") - users, err := c.discoverUsers(ctx) + users, err := c.discoverUsersWithRetries(ctx) if err != nil { level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err) - return errors.Wrap(err, "failed to discover users from bucket") + return } level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(users)) @@ -427,13 +477,11 @@ func (c *Compactor) compactUsers(ctx context.Context) error { users[i], users[j] = users[j], users[i] }) - errs := tsdb_errors.NewMulti() - for _, userID := range users { // Ensure the context has not been canceled (ie. compactor shutdown has been triggered). if ctx.Err() != nil { level.Info(c.logger).Log("msg", "interrupting compaction of user blocks", "err", err) - return ctx.Err() + return } // Ensure the user ID belongs to our shard. @@ -459,10 +507,9 @@ func (c *Compactor) compactUsers(ctx context.Context) error { level.Info(c.logger).Log("msg", "starting compaction of user blocks", "user", userID) - if err = c.compactUser(ctx, userID); err != nil { + if err = c.compactUserWithRetries(ctx, userID); err != nil { c.compactionRunFailedTenants.Inc() level.Error(c.logger).Log("msg", "failed to compact user blocks", "user", userID, "err", err) - errs.Add(errors.Wrapf(err, "failed to compact user blocks (user: %s)", userID)) continue } @@ -470,7 +517,28 @@ func (c *Compactor) compactUsers(ctx context.Context) error { level.Info(c.logger).Log("msg", "successfully compacted user blocks", "user", userID) } - return errs.Err() + succeeded = true +} + +func (c *Compactor) compactUserWithRetries(ctx context.Context, userID string) error { + var lastErr error + + retries := util.NewBackoff(ctx, util.BackoffConfig{ + MinBackoff: c.compactorCfg.retryMinBackoff, + MaxBackoff: c.compactorCfg.retryMaxBackoff, + MaxRetries: c.compactorCfg.CompactionRetries, + }) + + for retries.Ongoing() { + lastErr = c.compactUser(ctx, userID) + if lastErr == nil { + return nil + } + + retries.Wait() + } + + return lastErr } func (c *Compactor) compactUser(ctx context.Context, userID string) error { @@ -479,7 +547,7 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { reg := prometheus.NewRegistry() defer c.syncerMetrics.gatherThanosSyncerMetrics(reg) - ulogger := util.WithUserID(userID, c.logger) + ulogger := util_log.WithUserID(userID, c.logger) // Filters out duplicate blocks that can be formed from two or more overlapping // blocks that fully submatches the source blocks of the older blocks. @@ -532,22 +600,12 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { return errors.Wrap(err, "failed to create syncer") } - grouper := compact.NewDefaultGrouper( - ulogger, - bucket, - false, // Do not accept malformed indexes - true, // Enable vertical compaction - reg, - c.blocksMarkedForDeletion, - c.garbageCollectedBlocks, - ) - compactor, err := compact.NewBucketCompactor( ulogger, syncer, - grouper, - c.tsdbPlanner, - c.tsdbCompactor, + c.blocksGrouperFactory(ctx, c.compactorCfg, bucket, ulogger, reg, c.blocksMarkedForDeletion, c.garbageCollectedBlocks), + c.blocksPlanner, + c.blocksCompactor, path.Join(c.compactorCfg.DataDir, "compact"), bucket, c.compactorCfg.CompactionConcurrency, @@ -563,6 +621,29 @@ func (c *Compactor) compactUser(ctx context.Context, userID string) error { return nil } +func (c *Compactor) discoverUsersWithRetries(ctx context.Context) ([]string, error) { + var lastErr error + + retries := util.NewBackoff(ctx, util.BackoffConfig{ + MinBackoff: c.compactorCfg.retryMinBackoff, + MaxBackoff: c.compactorCfg.retryMaxBackoff, + MaxRetries: c.compactorCfg.CompactionRetries, + }) + + for retries.Ongoing() { + var users []string + + users, lastErr = c.discoverUsers(ctx) + if lastErr == nil { + return users, nil + } + + retries.Wait() + } + + return nil, lastErr +} + func (c *Compactor) discoverUsers(ctx context.Context) ([]string, error) { var users []string @@ -590,7 +671,7 @@ func (c *Compactor) ownUser(userID string) (bool, error) { userHash := hasher.Sum32() // Check whether this compactor instance owns the user. - rs, err := c.ring.Get(userHash, ring.Compactor, []ring.IngesterDesc{}) + rs, err := c.ring.Get(userHash, RingOp, nil, nil, nil) if err != nil { return false, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go index 2c0608ea056..d73c7a68906 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_http.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) { }{Message: message}) if err != nil { - level.Error(util.Logger).Log("msg", "unable to serve compactor ring page", "err", err) + level.Error(util_log.Logger).Log("msg", "unable to serve compactor ring page", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go index 7bfc930da21..d10f675f5e0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/compactor_ring.go @@ -9,8 +9,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // RingConfig masks the ring lifecycler config which contains @@ -40,7 +40,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } @@ -79,6 +79,7 @@ func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { // Configure lifecycler lc.RingConfig = rc + lc.RingConfig.SubringCacheDisabled = true lc.ListenPort = cfg.ListenPort lc.Addr = cfg.InstanceAddr lc.Port = cfg.InstancePort diff --git a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go index f3ba2f491a7..43873c2f0df 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/compactor/syncer_metrics.go @@ -6,6 +6,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Copied from Thanos, pkg/compact/compact.go. @@ -95,13 +96,13 @@ func (m *syncerMetrics) gatherThanosSyncerMetrics(reg *prometheus.Registry) { mf, err := reg.Gather() if err != nil { - level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) return } mfm, err := util.NewMetricFamilyMap(mf) if err != nil { - level.Warn(util.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from syncer registry after compaction", "err", err) return } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go index 7ee4a75360e..127292bc69d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/api/api.go @@ -24,6 +24,7 @@ import ( "github.com/cortexproject/cortex/pkg/configs/userconfig" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var ( @@ -113,7 +114,7 @@ func (a *API) getConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := util.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) cfg, err := a.db.GetConfig(r.Context(), userID) if err == sql.ErrNoRows { @@ -151,7 +152,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := util.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) var cfg userconfig.Config switch parseConfigFormat(r.Header.Get("Content-Type"), FormatJSON) { @@ -201,7 +202,7 @@ func (a *API) setConfig(w http.ResponseWriter, r *http.Request) { } func (a *API) validateAlertmanagerConfig(w http.ResponseWriter, r *http.Request) { - logger := util.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) cfg, err := ioutil.ReadAll(r.Body) if err != nil { level.Error(logger).Log("msg", "error reading request body", "err", err) @@ -265,7 +266,7 @@ type ConfigsView struct { func (a *API) getConfigs(w http.ResponseWriter, r *http.Request) { var cfgs map[string]userconfig.View var cfgErr error - logger := util.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) rawSince := r.FormValue("since") if rawSince == "" { cfgs, cfgErr = a.db.GetAllConfigs(r.Context()) @@ -301,7 +302,7 @@ func (a *API) deactivateConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := util.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) if err := a.db.DeactivateConfig(r.Context(), userID); err != nil { if err == sql.ErrNoRows { @@ -323,7 +324,7 @@ func (a *API) restoreConfig(w http.ResponseWriter, r *http.Request) { http.Error(w, err.Error(), http.StatusUnauthorized) return } - logger := util.WithContext(r.Context(), util.Logger) + logger := util_log.WithContext(r.Context(), util_log.Logger) if err := a.db.RestoreConfig(r.Context(), userID); err != nil { if err == sql.ErrNoRows { diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go index cc57ad82f6d..5517d1cb5b5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/client/client.go @@ -18,8 +18,8 @@ import ( "github.com/weaveworks/common/instrument" "github.com/cortexproject/cortex/pkg/configs/userconfig" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" tls_cfg "github.com/cortexproject/cortex/pkg/util/tls" ) @@ -155,7 +155,7 @@ func doRequest(endpoint string, timeout time.Duration, tlsConfig *tls.Config, si var config ConfigsResponse if err := json.NewDecoder(resp.Body).Decode(&config); err != nil { - level.Error(util.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) + level.Error(util_log.Logger).Log("msg", "configs: couldn't decode JSON body", "err", err) return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go index 2bd3bbd3e7c..c66ff90fd61 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/postgres/postgres.go @@ -9,6 +9,7 @@ import ( "time" "github.com/cortexproject/cortex/pkg/configs/userconfig" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/Masterminds/squirrel" "github.com/go-kit/kit/log/level" @@ -18,8 +19,6 @@ import ( "github.com/lib/pq" _ "github.com/lib/pq" // Import the postgres sql driver "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/util" ) const ( @@ -60,7 +59,7 @@ func dbWait(db *sql.DB) error { if err == nil { return nil } - level.Warn(util.Logger).Log("msg", "db connection not established, retrying...", "err", err) + level.Warn(util_log.Logger).Log("msg", "db connection not established, retrying...", "err", err) time.Sleep(time.Second << uint(tries)) } return errors.Wrapf(err, "db connection not established after %s", dbTimeout) @@ -88,13 +87,13 @@ func New(uri, migrationsDir string) (DB, error) { return DB{}, errors.Wrap(err, "database migrations initialization failed") } - level.Info(util.Logger).Log("msg", "running database migrations...") + level.Info(util_log.Logger).Log("msg", "running database migrations...") if err := m.Up(); err != nil { if err != migrate.ErrNoChange { return DB{}, errors.Wrap(err, "database migrations failed") } - level.Debug(util.Logger).Log("msg", "no change in schema, error (ignored)", "err", err) + level.Debug(util_log.Logger).Log("msg", "no change in schema, error (ignored)", "err", err) } } @@ -354,7 +353,7 @@ func (d DB) Transaction(f func(DB) error) error { if err != nil { // Rollback error is ignored as we already have one in progress if err2 := tx.Rollback(); err2 != nil { - level.Warn(util.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2) + level.Warn(util_log.Logger).Log("msg", "transaction rollback error (ignored)", "err", err2) } return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go index 7a2cc3aac61..5ae94c01d30 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/db/traced.go @@ -5,10 +5,9 @@ import ( "fmt" "github.com/cortexproject/cortex/pkg/configs/userconfig" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" - - "github.com/cortexproject/cortex/pkg/util" ) // traced adds log trace lines on each db call @@ -17,7 +16,7 @@ type traced struct { } func (t traced) trace(name string, args ...interface{}) { - level.Debug(util.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) + level.Debug(util_log.Logger).Log("msg", fmt.Sprintf("%s: %#v", name, args)) } func (t traced) GetConfig(ctx context.Context, userID string) (cfg userconfig.View, err error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go index 0b36801012c..e55542fd443 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/configs/userconfig/config.go @@ -17,7 +17,7 @@ import ( "github.com/prometheus/prometheus/rules" legacy_promql "github.com/cortexproject/cortex/pkg/configs/legacy_promql" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // An ID is the ID of a single users's Cortex configuration. When a @@ -370,7 +370,7 @@ func (c RulesConfig) parseV2() (map[string][]rules.Rule, error) { labels.FromMap(rl.Annotations), nil, true, - log.With(util.Logger, "alert", rl.Alert.Value), + log.With(util_log.Logger, "alert", rl.Alert.Value), )) continue } @@ -418,7 +418,7 @@ func (c RulesConfig) parseV1() (map[string][]rules.Rule, error) { rule = rules.NewAlertingRule( r.Name, expr, r.Duration, r.Labels, r.Annotations, nil, true, - log.With(util.Logger, "alert", r.Name), + log.With(util_log.Logger, "alert", r.Name), ) case *legacy_promql.RecordStmt: diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go index d66b581e197..6573f3c6a68 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/cortex.go @@ -39,6 +39,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/queryrange" + "github.com/cortexproject/cortex/pkg/querier/tenantfederation" querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" @@ -47,10 +48,12 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler" "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/storegateway" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/fakeauth" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpc/healthcheck" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/process" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" @@ -82,27 +85,28 @@ type Config struct { PrintConfig bool `yaml:"-"` HTTPPrefix string `yaml:"http_prefix"` - API api.Config `yaml:"api"` - Server server.Config `yaml:"server"` - Distributor distributor.Config `yaml:"distributor"` - Querier querier.Config `yaml:"querier"` - IngesterClient client.Config `yaml:"ingester_client"` - Ingester ingester.Config `yaml:"ingester"` - Flusher flusher.Config `yaml:"flusher"` - Storage storage.Config `yaml:"storage"` - ChunkStore chunk.StoreConfig `yaml:"chunk_store"` - Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) - LimitsConfig validation.Limits `yaml:"limits"` - Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` - Worker querier_worker.Config `yaml:"frontend_worker"` - Frontend frontend.CombinedFrontendConfig `yaml:"frontend"` - QueryRange queryrange.Config `yaml:"query_range"` - TableManager chunk.TableManagerConfig `yaml:"table_manager"` - Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. - BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` - Compactor compactor.Config `yaml:"compactor"` - StoreGateway storegateway.Config `yaml:"store_gateway"` - PurgerConfig purger.Config `yaml:"purger"` + API api.Config `yaml:"api"` + Server server.Config `yaml:"server"` + Distributor distributor.Config `yaml:"distributor"` + Querier querier.Config `yaml:"querier"` + IngesterClient client.Config `yaml:"ingester_client"` + Ingester ingester.Config `yaml:"ingester"` + Flusher flusher.Config `yaml:"flusher"` + Storage storage.Config `yaml:"storage"` + ChunkStore chunk.StoreConfig `yaml:"chunk_store"` + Schema chunk.SchemaConfig `yaml:"schema" doc:"hidden"` // Doc generation tool doesn't support it because part of the SchemaConfig doesn't support CLI flags (needs manual documentation) + LimitsConfig validation.Limits `yaml:"limits"` + Prealloc client.PreallocConfig `yaml:"prealloc" doc:"hidden"` + Worker querier_worker.Config `yaml:"frontend_worker"` + Frontend frontend.CombinedFrontendConfig `yaml:"frontend"` + QueryRange queryrange.Config `yaml:"query_range"` + TableManager chunk.TableManagerConfig `yaml:"table_manager"` + Encoding encoding.Config `yaml:"-"` // No yaml for this, it only works with flags. + BlocksStorage tsdb.BlocksStorageConfig `yaml:"blocks_storage"` + Compactor compactor.Config `yaml:"compactor"` + StoreGateway storegateway.Config `yaml:"store_gateway"` + PurgerConfig purger.Config `yaml:"purger"` + TenantFederation tenantfederation.Config `yaml:"tenant_federation"` Ruler ruler.Config `yaml:"ruler"` Configs configs.Config `yaml:"configs"` @@ -149,6 +153,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet) { c.Compactor.RegisterFlags(f) c.StoreGateway.RegisterFlags(f) c.PurgerConfig.RegisterFlags(f) + c.TenantFederation.RegisterFlags(f) c.Ruler.RegisterFlags(f) c.Configs.RegisterFlags(f) @@ -304,6 +309,12 @@ func New(cfg Config) (*Cortex, error) { os.Exit(0) } + // Swap out the default resolver to support multiple tenant IDs separated by a '|' + if cfg.TenantFederation.Enabled { + util_log.WarnExperimentalUse("tenant-federation") + tenant.WithDefaultResolver(tenant.NewMultiResolver()) + } + // Don't check auth header on TransferChunks, as we weren't originally // sending it and this could cause transfers to fail on update. cfg.API.HTTPAuthMiddleware = fakeauth.SetupAuthMiddleware(&cfg.Server, cfg.AuthEnabled, @@ -342,12 +353,12 @@ func (t *Cortex) Run() error { if c, err := process.NewProcessCollector(); err == nil { prometheus.MustRegister(c) } else { - level.Warn(util.Logger).Log("msg", "skipped registration of custom process metrics collector", "err", err) + level.Warn(util_log.Logger).Log("msg", "skipped registration of custom process metrics collector", "err", err) } for _, module := range t.Cfg.Target { if !t.ModuleManager.IsUserVisibleModule(module) { - level.Warn(util.Logger).Log("msg", "selected target is an internal module, is this intended?", "target", module) + level.Warn(util_log.Logger).Log("msg", "selected target is an internal module, is this intended?", "target", module) } } @@ -376,8 +387,8 @@ func (t *Cortex) Run() error { grpc_health_v1.RegisterHealthServer(t.Server.GRPC, healthcheck.New(sm)) // Let's listen for events from this manager, and log them. - healthy := func() { level.Info(util.Logger).Log("msg", "Cortex started") } - stopped := func() { level.Info(util.Logger).Log("msg", "Cortex stopped") } + healthy := func() { level.Info(util_log.Logger).Log("msg", "Cortex started") } + stopped := func() { level.Info(util_log.Logger).Log("msg", "Cortex stopped") } serviceFailed := func(service services.Service) { // if any service fails, stop entire Cortex sm.StopAsync() @@ -386,15 +397,15 @@ func (t *Cortex) Run() error { for m, s := range t.ServiceMap { if s == service { if service.FailureCase() == util.ErrStopProcess { - level.Info(util.Logger).Log("msg", "received stop signal via return error", "module", m, "err", service.FailureCase()) + level.Info(util_log.Logger).Log("msg", "received stop signal via return error", "module", m, "err", service.FailureCase()) } else { - level.Error(util.Logger).Log("msg", "module failed", "module", m, "err", service.FailureCase()) + level.Error(util_log.Logger).Log("msg", "module failed", "module", m, "err", service.FailureCase()) } return } } - level.Error(util.Logger).Log("msg", "module failed", "module", "unknown", "err", service.FailureCase()) + level.Error(util_log.Logger).Log("msg", "module failed", "module", "unknown", "err", service.FailureCase()) } sm.AddListener(services.NewManagerListener(healthy, stopped, serviceFailed)) @@ -465,6 +476,6 @@ func (t *Cortex) readyHandler(sm *services.Manager) http.HandlerFunc { } } - http.Error(w, "ready", http.StatusOK) + util.WriteTextResponse(w, "ready") } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go index 5ec08f5c72e..033f3f8bafc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/modules.go @@ -1,6 +1,7 @@ package cortex import ( + "flag" "fmt" "os" "time" @@ -30,6 +31,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester" "github.com/cortexproject/cortex/pkg/querier" "github.com/cortexproject/cortex/pkg/querier/queryrange" + "github.com/cortexproject/cortex/pkg/querier/tenantfederation" querier_worker "github.com/cortexproject/cortex/pkg/querier/worker" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv/codec" @@ -37,8 +39,7 @@ import ( "github.com/cortexproject/cortex/pkg/ruler" "github.com/cortexproject/cortex/pkg/scheduler" "github.com/cortexproject/cortex/pkg/storegateway" - "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/modules" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/services" @@ -76,21 +77,28 @@ const ( BlocksPurger string = "blocks-purger" Purger string = "purger" QueryScheduler string = "query-scheduler" + TenantFederation string = "tenant-federation" All string = "all" ) +func newDefaultConfig() *Config { + defaultConfig := &Config{} + defaultFS := flag.NewFlagSet("", flag.PanicOnError) + defaultConfig.RegisterFlags(defaultFS) + return defaultConfig +} + func (t *Cortex) initAPI() (services.Service, error) { t.Cfg.API.ServerPrefix = t.Cfg.Server.PathPrefix t.Cfg.API.LegacyHTTPPrefix = t.Cfg.HTTPPrefix - a, err := api.New(t.Cfg.API, t.Cfg.Server, t.Server, util.Logger) + a, err := api.New(t.Cfg.API, t.Cfg.Server, t.Server, util_log.Logger) if err != nil { return nil, err } t.API = a - - t.API.RegisterAPI(t.Cfg.Server.PathPrefix, t.Cfg) + t.API.RegisterAPI(t.Cfg.Server.PathPrefix, t.Cfg, newDefaultConfig()) return nil, nil } @@ -135,19 +143,6 @@ func (t *Cortex) initRing() (serv services.Service, err error) { } func (t *Cortex) initRuntimeConfig() (services.Service, error) { - // We need to modify LimitsConfig before calling SetDefaultLimitsForYAMLUnmarshalling later in this method - // but also if runtime-config is not used, for setting limits used by initOverrides. - // TODO: Remove this in Cortex 1.6. - if t.Cfg.Ruler.EvaluationDelay != 0 && t.Cfg.LimitsConfig.RulerEvaluationDelay == 0 { - t.Cfg.LimitsConfig.RulerEvaluationDelay = t.Cfg.Ruler.EvaluationDelay - - // No need to report if this field isn't going to be used. - if t.Cfg.isModuleEnabled(Ruler) || t.Cfg.isModuleEnabled(All) { - flagext.DeprecatedFlagsUsed.Inc() - level.Warn(util.Logger).Log("msg", "Using DEPRECATED YAML config field ruler.evaluation_delay_duration, please use limits.ruler_evaluation_delay_duration instead.") - } - } - if t.Cfg.RuntimeConfig.LoadPath == "" { t.Cfg.RuntimeConfig.LoadPath = t.Cfg.LimitsConfig.PerTenantOverrideConfig t.Cfg.RuntimeConfig.ReloadPeriod = t.Cfg.LimitsConfig.PerTenantOverridePeriod @@ -164,6 +159,7 @@ func (t *Cortex) initRuntimeConfig() (services.Service, error) { serv, err := runtimeconfig.NewRuntimeConfigManager(t.Cfg.RuntimeConfig, prometheus.DefaultRegisterer) t.RuntimeConfig = serv + t.API.RegisterRuntimeConfig(runtimeConfigHandler(t.RuntimeConfig, t.Cfg.LimitsConfig)) return serv, err } @@ -211,6 +207,14 @@ func (t *Cortex) initQueryable() (serv services.Service, err error) { return nil, nil } +// Enable merge querier if multi tenant query federation is enabled +func (t *Cortex) initTenantFederation() (serv services.Service, err error) { + if t.Cfg.TenantFederation.Enabled { + t.QuerierQueryable = querier.NewSampleAndChunkQueryable(tenantfederation.NewQueryable(t.QuerierQueryable)) + } + return nil, nil +} + // initQuerier registers an internal HTTP router with a Prometheus API backed by the // Cortex Queryable. Then it does one of the following: // @@ -271,7 +275,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { t.Distributor, t.TombstonesLoader, prometheus.DefaultRegisterer, - util.Logger, + util_log.Logger, ) // If the querier is running standalone without the query-frontend or query-scheduler, we must register it's internal @@ -286,11 +290,11 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { // and internal using the default instrumentation when running as a standalone service. internalQuerierRouter = t.Server.HTTPServer.Handler } else { - // Single binary mode requires a query frontend endpoint for the worker. If no frontend or scheduler endpoint + // Single binary mode requires a query frontend endpoint for the worker. If no frontend and scheduler endpoint // is configured, Cortex will default to using frontend on localhost on it's own GRPC listening port. - if t.Cfg.Worker.FrontendAddress == "" || t.Cfg.Worker.SchedulerAddress == "" { + if t.Cfg.Worker.FrontendAddress == "" && t.Cfg.Worker.SchedulerAddress == "" { address := fmt.Sprintf("127.0.0.1:%d", t.Cfg.Server.GRPCListenPort) - level.Warn(util.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) + level.Warn(util_log.Logger).Log("msg", "Worker address is empty in single binary mode. Attempting automatic worker configuration. If queries are unresponsive consider configuring the worker explicitly.", "address", address) t.Cfg.Worker.FrontendAddress = address } @@ -306,8 +310,7 @@ func (t *Cortex) initQuerier() (serv services.Service, err error) { } t.Cfg.Worker.MaxConcurrentRequests = t.Cfg.Querier.MaxConcurrent - t.Cfg.Worker.QueryStatsEnabled = t.Cfg.Frontend.Handler.QueryStatsEnabled - return querier_worker.NewQuerierWorker(t.Cfg.Worker, httpgrpc_server.NewServer(internalQuerierRouter), util.Logger, prometheus.DefaultRegisterer) + return querier_worker.NewQuerierWorker(t.Cfg.Worker, httpgrpc_server.NewServer(internalQuerierRouter), util_log.Logger, prometheus.DefaultRegisterer) } func (t *Cortex) initStoreQueryables() (services.Service, error) { @@ -369,7 +372,7 @@ func initQueryableForEngine(engine string, cfg Config, chunkStore chunk.Store, l cfg.Querier.StoreGatewayAddresses = fmt.Sprintf("127.0.0.1:%d", cfg.Server.GRPCListenPort) } - return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.BlocksStorage, limits, util.Logger, reg) + return querier.NewBlocksStoreQueryableFromConfig(cfg.Querier, cfg.StoreGateway, cfg.BlocksStorage, limits, util_log.Logger, reg) default: return nil, fmt.Errorf("unknown storage engine '%s'", engine) @@ -388,7 +391,7 @@ func (t *Cortex) initIngesterService() (serv services.Service, err error) { t.Cfg.Ingester.DistributorShardByAllLabels = t.Cfg.Distributor.ShardByAllLabels t.tsdbIngesterConfig() - t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Overrides, t.Store, prometheus.DefaultRegisterer) + t.Ingester, err = ingester.New(t.Cfg.Ingester, t.Cfg.IngesterClient, t.Overrides, t.Store, prometheus.DefaultRegisterer, util_log.Logger) if err != nil { return } @@ -411,6 +414,7 @@ func (t *Cortex) initFlusher() (serv services.Service, err error) { t.Store, t.Overrides, prometheus.DefaultRegisterer, + util_log.Logger, ) if err != nil { return @@ -428,7 +432,7 @@ func (t *Cortex) initChunkStore() (serv services.Service, err error) { return } - t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader, util.Logger) + t.Store, err = storage.NewStore(t.Cfg.Storage, t.Cfg.ChunkStore, t.Cfg.Schema, t.Overrides, prometheus.DefaultRegisterer, t.TombstonesLoader, util_log.Logger) if err != nil { return } @@ -478,16 +482,17 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro tripperware, cache, err := queryrange.NewTripperware( t.Cfg.QueryRange, - util.Logger, + util_log.Logger, t.Overrides, queryrange.PrometheusCodec, queryrange.PrometheusResponseExtractor{}, t.Cfg.Schema, promql.EngineOpts{ - Logger: util.Logger, - Reg: prometheus.DefaultRegisterer, - MaxSamples: t.Cfg.Querier.MaxSamples, - Timeout: t.Cfg.Querier.Timeout, + Logger: util_log.Logger, + Reg: prometheus.DefaultRegisterer, + MaxSamples: t.Cfg.Querier.MaxSamples, + Timeout: t.Cfg.Querier.Timeout, + EnableAtModifier: t.Cfg.Querier.AtModifierEnabled, NoStepSubqueryIntervalFn: func(int64) int64 { return t.Cfg.Querier.DefaultEvaluationInterval.Milliseconds() }, @@ -513,7 +518,7 @@ func (t *Cortex) initQueryFrontendTripperware() (serv services.Service, err erro } func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { - roundTripper, frontendV1, frontendV2, err := frontend.InitFrontend(t.Cfg.Frontend, t.Overrides, t.Cfg.Server.GRPCListenPort, util.Logger, prometheus.DefaultRegisterer) + roundTripper, frontendV1, frontendV2, err := frontend.InitFrontend(t.Cfg.Frontend, t.Overrides, t.Cfg.Server.GRPCListenPort, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -521,7 +526,7 @@ func (t *Cortex) initQueryFrontend() (serv services.Service, err error) { // Wrap roundtripper into Tripperware. roundTripper = t.QueryFrontendTripperware(roundTripper) - handler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util.Logger, prometheus.DefaultRegisterer) + handler := transport.NewHandler(t.Cfg.Frontend.Handler, roundTripper, util_log.Logger, prometheus.DefaultRegisterer) if t.Cfg.Frontend.CompressResponses { handler = gziphandler.GzipHandler(handler) } @@ -567,7 +572,7 @@ func (t *Cortex) initTableManager() (services.Service, error) { t.Cfg.TableManager.ChunkTables.InactiveReadScale.Enabled || t.Cfg.TableManager.IndexTables.InactiveReadScale.Enabled) && t.Cfg.Storage.AWSStorageConfig.Metrics.URL == "" { - level.Error(util.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") + level.Error(util_log.Logger).Log("msg", "WriteScale is enabled but no Metrics URL has been provided") os.Exit(1) } @@ -580,7 +585,7 @@ func (t *Cortex) initTableManager() (services.Service, error) { } bucketClient, err := storage.NewBucketClient(t.Cfg.Storage) - util.CheckFatal("initializing bucket client", err) + util_log.CheckFatal("initializing bucket client", err) var extraTables []chunk.ExtraTables if t.Cfg.PurgerConfig.Enable { @@ -606,7 +611,7 @@ func (t *Cortex) initRulerStorage() (serv services.Service, err error) { // to determine if it's unconfigured. the following check, however, correctly tests this. // Single binary integration tests will break if this ever drifts if t.Cfg.isModuleEnabled(All) && t.Cfg.Ruler.StoreConfig.IsDefaults() { - level.Info(util.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.") + level.Info(util_log.Logger).Log("msg", "RulerStorage is not configured in single binary mode and will not be started.") return } @@ -616,7 +621,7 @@ func (t *Cortex) initRulerStorage() (serv services.Service, err error) { func (t *Cortex) initRuler() (serv services.Service, err error) { if t.RulerStorage == nil { - level.Info(util.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.") + level.Info(util_log.Logger).Log("msg", "RulerStorage is nil. Not starting the ruler.") return nil, nil } @@ -625,7 +630,7 @@ func (t *Cortex) initRuler() (serv services.Service, err error) { queryable, engine := querier.New(t.Cfg.Querier, t.Overrides, t.Distributor, t.StoreQueryables, t.TombstonesLoader, rulerRegisterer) managerFactory := ruler.DefaultTenantManagerFactory(t.Cfg.Ruler, t.Distributor, queryable, engine, t.Overrides) - manager, err := ruler.NewDefaultMultiTenantManager(t.Cfg.Ruler, managerFactory, prometheus.DefaultRegisterer, util.Logger) + manager, err := ruler.NewDefaultMultiTenantManager(t.Cfg.Ruler, managerFactory, prometheus.DefaultRegisterer, util_log.Logger) if err != nil { return nil, err } @@ -634,7 +639,7 @@ func (t *Cortex) initRuler() (serv services.Service, err error) { t.Cfg.Ruler, manager, prometheus.DefaultRegisterer, - util.Logger, + util_log.Logger, t.RulerStorage, t.Overrides, ) @@ -668,7 +673,9 @@ func (t *Cortex) initConfig() (serv services.Service, err error) { } func (t *Cortex) initAlertManager() (serv services.Service, err error) { - t.Alertmanager, err = alertmanager.NewMultitenantAlertmanager(&t.Cfg.Alertmanager, util.Logger, prometheus.DefaultRegisterer) + t.Cfg.Alertmanager.ShardingRing.ListenPort = t.Cfg.Server.HTTPListenPort + + t.Alertmanager, err = alertmanager.NewMultitenantAlertmanager(&t.Cfg.Alertmanager, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return } @@ -680,7 +687,7 @@ func (t *Cortex) initAlertManager() (serv services.Service, err error) { func (t *Cortex) initCompactor() (serv services.Service, err error) { t.Cfg.Compactor.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort - t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.BlocksStorage, util.Logger, prometheus.DefaultRegisterer) + t.Compactor, err = compactor.NewCompactor(t.Cfg.Compactor, t.Cfg.BlocksStorage, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return } @@ -700,7 +707,7 @@ func (t *Cortex) initStoreGateway() (serv services.Service, err error) { t.Cfg.StoreGateway.ShardingRing.ListenPort = t.Cfg.Server.GRPCListenPort - t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util.Logger, prometheus.DefaultRegisterer) + t.StoreGateway, err = storegateway.NewStoreGateway(t.Cfg.StoreGateway, t.Cfg.BlocksStorage, t.Overrides, t.Cfg.Server.LogLevel, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -716,7 +723,8 @@ func (t *Cortex) initMemberlistKV() (services.Service, error) { t.Cfg.MemberlistKV.Codecs = []codec.Codec{ ring.GetCodec(), } - t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV, util.Logger) + t.MemberlistKV = memberlist.NewKVInitService(&t.Cfg.MemberlistKV, util_log.Logger) + t.API.RegisterMemberlistKV(t.MemberlistKV) // Update the config. t.Cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV @@ -724,6 +732,7 @@ func (t *Cortex) initMemberlistKV() (services.Service, error) { t.Cfg.StoreGateway.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV t.Cfg.Ruler.Ring.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV + t.Cfg.Alertmanager.ShardingRing.KVStore.MemberlistKV = t.MemberlistKV.GetMemberlistKV return t.MemberlistKV, nil } @@ -753,7 +762,7 @@ func (t *Cortex) initBlocksPurger() (services.Service, error) { return nil, nil } - purgerAPI, err := purger.NewBlocksPurgerAPI(t.Cfg.BlocksStorage, util.Logger, prometheus.DefaultRegisterer) + purgerAPI, err := purger.NewBlocksPurgerAPI(t.Cfg.BlocksStorage, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, err } @@ -763,7 +772,7 @@ func (t *Cortex) initBlocksPurger() (services.Service, error) { } func (t *Cortex) initQueryScheduler() (services.Service, error) { - s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util.Logger, prometheus.DefaultRegisterer) + s, err := scheduler.NewScheduler(t.Cfg.QueryScheduler, t.Overrides, util_log.Logger, prometheus.DefaultRegisterer) if err != nil { return nil, errors.Wrap(err, "query-scheduler init") } @@ -806,11 +815,14 @@ func (t *Cortex) setupModuleManager() error { mm.RegisterModule(BlocksPurger, t.initBlocksPurger, modules.UserInvisibleModule) mm.RegisterModule(Purger, nil) mm.RegisterModule(QueryScheduler, t.initQueryScheduler) + mm.RegisterModule(TenantFederation, t.initTenantFederation, modules.UserInvisibleModule) mm.RegisterModule(All, nil) // Add dependencies deps := map[string][]string{ API: {Server}, + MemberlistKV: {API}, + RuntimeConfig: {API}, Ring: {API, RuntimeConfig, MemberlistKV}, Overrides: {RuntimeConfig}, Distributor: {DistributorService, API}, @@ -820,7 +832,7 @@ func (t *Cortex) setupModuleManager() error { IngesterService: {Overrides, Store, RuntimeConfig, MemberlistKV}, Flusher: {Store, API}, Queryable: {Overrides, DistributorService, Store, Ring, API, StoreQueryable, MemberlistKV}, - Querier: {Queryable}, + Querier: {TenantFederation}, StoreQueryable: {Overrides, Store, MemberlistKV}, QueryFrontendTripperware: {API, Overrides, DeleteRequestsStore}, QueryFrontend: {QueryFrontendTripperware}, @@ -828,12 +840,13 @@ func (t *Cortex) setupModuleManager() error { TableManager: {API}, Ruler: {Overrides, DistributorService, Store, StoreQueryable, RulerStorage}, Configs: {API}, - AlertManager: {API}, + AlertManager: {API, MemberlistKV}, Compactor: {API, MemberlistKV}, StoreGateway: {API, Overrides, MemberlistKV}, ChunksPurger: {Store, DeleteRequestsStore, API}, BlocksPurger: {Store, API}, Purger: {ChunksPurger, BlocksPurger}, + TenantFederation: {Queryable}, All: {QueryFrontend, Querier, Ingester, Distributor, TableManager, Purger, StoreGateway, Ruler}, } for mod, targets := range deps { diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go index 41916d53eca..eea655b0817 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/runtime_config.go @@ -1,15 +1,22 @@ package cortex import ( + "errors" "io" + "net/http" "gopkg.in/yaml.v2" "github.com/cortexproject/cortex/pkg/ring/kv" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/runtimeconfig" "github.com/cortexproject/cortex/pkg/util/validation" ) +var ( + errMultipleDocuments = errors.New("the provided runtime configuration contains multiple documents") +) + // runtimeConfigValues are values that can be reloaded from configuration file while Cortex is running. // Reloading is done by runtime_config.Manager, which also keeps the currently loaded config. // These values are then pushed to the components that are interested in them. @@ -24,10 +31,17 @@ func loadRuntimeConfig(r io.Reader) (interface{}, error) { decoder := yaml.NewDecoder(r) decoder.SetStrict(true) - if err := decoder.Decode(&overrides); err != nil { + + // Decode the first document. An empty document (EOF) is OK. + if err := decoder.Decode(&overrides); err != nil && !errors.Is(err, io.EOF) { return nil, err } + // Ensure the provided YAML config is not composed of multiple documents, + if err := decoder.Decode(&runtimeConfigValues{}); !errors.Is(err, io.EOF) { + return nil, errMultipleDocuments + } + return overrides, nil } @@ -71,3 +85,48 @@ func multiClientRuntimeConfigChannel(manager *runtimeconfig.Manager) func() <-ch return outCh } } +func runtimeConfigHandler(runtimeCfgManager *runtimeconfig.Manager, defaultLimits validation.Limits) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + cfg, ok := runtimeCfgManager.GetConfig().(*runtimeConfigValues) + if !ok || cfg == nil { + util.WriteTextResponse(w, "runtime config file doesn't exist") + return + } + + var output interface{} + switch r.URL.Query().Get("mode") { + case "diff": + // Default runtime config is just empty struct, but to make diff work, + // we set defaultLimits for every tenant that exists in runtime config. + defaultCfg := runtimeConfigValues{} + defaultCfg.TenantLimits = map[string]*validation.Limits{} + for k, v := range cfg.TenantLimits { + if v != nil { + defaultCfg.TenantLimits[k] = &defaultLimits + } + } + + cfgYaml, err := util.YAMLMarshalUnmarshal(cfg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + defaultCfgYaml, err := util.YAMLMarshalUnmarshal(defaultCfg) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + output, err = util.DiffConfig(defaultCfgYaml, cfgYaml) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + default: + output = cfg + } + util.WriteYAMLResponse(w, output) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go index 7a565fbfb01..850aedcd32b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go +++ b/vendor/github.com/cortexproject/cortex/pkg/cortex/server_service.go @@ -7,7 +7,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/weaveworks/common/server" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -47,7 +47,7 @@ func NewServerService(serv *server.Server, servicesToWaitFor func() []services.S // if not closed yet, wait until server stops. <-serverDone - level.Info(util.Logger).Log("msg", "server stopped") + level.Info(util_log.Logger).Log("msg", "server stopped") return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go index d1445c801a7..a68834caa46 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor.go @@ -9,7 +9,8 @@ import ( "strings" "time" - opentracing "github.com/opentracing/opentracing-go" + "github.com/go-kit/kit/log/level" + "github.com/opentracing/opentracing-go" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -21,7 +22,6 @@ import ( "github.com/weaveworks/common/instrument" "github.com/weaveworks/common/user" - "github.com/cortexproject/cortex/pkg/ingester/client" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/prom1/storage/metric" "github.com/cortexproject/cortex/pkg/ring" @@ -30,6 +30,8 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" "github.com/cortexproject/cortex/pkg/util/limiter" + "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -113,14 +115,14 @@ var ( // Validation errors. errInvalidShardingStrategy = errors.New("invalid sharding strategy") errInvalidTenantShardSize = errors.New("invalid tenant shard size, the value must be greater than 0") + + inactiveUserTimeout = 15 * time.Minute + metricsCleanupInterval = inactiveUserTimeout / 5 ) const ( typeSamples = "samples" typeMetadata = "metadata" - - // Supported sharding strategies. - ) // Distributor is a storage.SampleAppender and a client.Querier which @@ -146,6 +148,8 @@ type Distributor struct { // Manager for subservices (HA Tracker, distributor ring and client pool) subservices *services.Manager subservicesWatcher *services.FailureWatcher + + activeUsers *util.ActiveUsers } // Config contains the configuration require to @@ -161,6 +165,7 @@ type Config struct { ShardingStrategy string `yaml:"sharding_strategy"` ShardByAllLabels bool `yaml:"shard_by_all_labels"` + ExtendWrites bool `yaml:"extend_writes"` // Distributors ring DistributorRing RingConfig `yaml:"ring"` @@ -187,6 +192,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.ExtraQueryDelay, "distributor.extra-query-delay", 0, "Time to wait before sending more than the minimum successful query requests.") f.BoolVar(&cfg.ShardByAllLabels, "distributor.shard-by-all-labels", false, "Distribute samples based on all labels, as opposed to solely by user and metric name.") f.StringVar(&cfg.ShardingStrategy, "distributor.sharding-strategy", util.ShardingStrategyDefault, fmt.Sprintf("The sharding strategy to use. Supported values are: %s.", strings.Join(supportedShardingStrategies, ", "))) + f.BoolVar(&cfg.ExtendWrites, "distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") } // Validate config and returns error on failure @@ -213,7 +219,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove replicationFactor.Set(float64(ingestersRing.ReplicationFactor())) cfg.PoolConfig.RemoteTimeout = cfg.RemoteTimeout - replicas, err := newClusterTracker(cfg.HATrackerConfig, reg) + replicas, err := newClusterTracker(cfg.HATrackerConfig, limits, reg) if err != nil { return nil, err } @@ -245,11 +251,12 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove d := &Distributor{ cfg: cfg, ingestersRing: ingestersRing, - ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, util.Logger), + ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, log.Logger), distributorsRing: distributorsRing, limits: limits, ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), HATracker: replicas, + activeUsers: util.NewActiveUsers(), } subservices = append(subservices, d.ingesterPool) @@ -270,20 +277,49 @@ func (d *Distributor) starting(ctx context.Context) error { } func (d *Distributor) running(ctx context.Context) error { - select { - case <-ctx.Done(): - return nil - case err := <-d.subservicesWatcher.Chan(): - return errors.Wrap(err, "distributor subservice failed") + metricsCleanupTimer := time.NewTicker(metricsCleanupInterval) + defer metricsCleanupTimer.Stop() + + for { + select { + case <-metricsCleanupTimer.C: + inactiveUsers := d.activeUsers.PurgeInactiveUsers(time.Now().Add(-inactiveUserTimeout).UnixNano()) + for _, userID := range inactiveUsers { + cleanupMetricsForUser(userID) + } + continue + + case <-ctx.Done(): + return nil + + case err := <-d.subservicesWatcher.Chan(): + return errors.Wrap(err, "distributor subservice failed") + } } } +func cleanupMetricsForUser(userID string) { + receivedSamples.DeleteLabelValues(userID) + receivedMetadata.DeleteLabelValues(userID) + incomingSamples.DeleteLabelValues(userID) + incomingMetadata.DeleteLabelValues(userID) + nonHASamples.DeleteLabelValues(userID) + latestSeenSampleTimestampPerUser.DeleteLabelValues(userID) + + if err := util.DeleteMatchingLabels(dedupedSamples, map[string]string{"user": userID}); err != nil { + level.Warn(log.Logger).Log("msg", "failed to remove cortex_distributor_deduped_samples_total metric for user", "user", userID, "err", err) + } + + validation.DeletePerUserValidationMetrics(userID, log.Logger) + cleanupHATrackerMetricsForUser(userID, log.Logger) +} + // Called after distributor is asked to stop via StopAsync. func (d *Distributor) stopping(_ error) error { return services.StopManagerAndAwaitStopped(context.Background(), d.subservices) } -func (d *Distributor) tokenForLabels(userID string, labels []client.LabelAdapter) (uint32, error) { +func (d *Distributor) tokenForLabels(userID string, labels []ingester_client.LabelAdapter) (uint32, error) { if d.cfg.ShardByAllLabels { return shardByAllLabels(userID, labels), nil } @@ -305,28 +341,28 @@ func (d *Distributor) tokenForMetadata(userID string, metricName string) uint32 func shardByMetricName(userID string, metricName string) uint32 { h := shardByUser(userID) - h = client.HashAdd32(h, metricName) + h = ingester_client.HashAdd32(h, metricName) return h } func shardByUser(userID string) uint32 { - h := client.HashNew32() - h = client.HashAdd32(h, userID) + h := ingester_client.HashNew32() + h = ingester_client.HashAdd32(h, userID) return h } // This function generates different values for different order of same labels. -func shardByAllLabels(userID string, labels []client.LabelAdapter) uint32 { +func shardByAllLabels(userID string, labels []ingester_client.LabelAdapter) uint32 { h := shardByUser(userID) for _, label := range labels { - h = client.HashAdd32(h, label.Name) - h = client.HashAdd32(h, label.Value) + h = ingester_client.HashAdd32(h, label.Name) + h = ingester_client.HashAdd32(h, label.Value) } return h } // Remove the label labelname from a slice of LabelPairs if it exists. -func removeLabel(labelName string, labels *[]client.LabelAdapter) { +func removeLabel(labelName string, labels *[]ingester_client.LabelAdapter) { for i := 0; i < len(*labels); i++ { pair := (*labels)[i] if pair.Name == labelName { @@ -339,13 +375,18 @@ func removeLabel(labelName string, labels *[]client.LabelAdapter) { // Returns a boolean that indicates whether or not we want to remove the replica label going forward, // and an error that indicates whether we want to accept samples based on the cluster/replica found in ts. // nil for the error means accept the sample. -func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica string) (bool, error) { +func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica string) (removeReplicaLabel bool, _ error) { // If the sample doesn't have either HA label, accept it. // At the moment we want to accept these samples by default. if cluster == "" || replica == "" { return false, nil } + // If replica label is too long, don't use it. We accept the sample here, but it will fail validation later anyway. + if len(replica) > d.limits.MaxLabelValueLength(userID) { + return false, nil + } + // At this point we know we have both HA labels, we should lookup // the cluster/instance here to see if we want to accept this sample. err := d.HATracker.checkReplica(ctx, userID, cluster, replica) @@ -359,14 +400,14 @@ func (d *Distributor) checkSample(ctx context.Context, userID, cluster, replica // Validates a single series from a write request. Will remove labels if // any are configured to be dropped for the user ID. // Returns the validated series with it's labels/samples, and any error. -func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, userID string) (client.PreallocTimeseries, error) { +func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, userID string, skipLabelNameValidation bool) (ingester_client.PreallocTimeseries, error) { labelsHistogram.Observe(float64(len(ts.Labels))) - if err := validation.ValidateLabels(d.limits, userID, ts.Labels, d.cfg.SkipLabelNameValidation); err != nil { + if err := validation.ValidateLabels(d.limits, userID, ts.Labels, skipLabelNameValidation); err != nil { return emptyPreallocSeries, err } metricName, _ := extract.MetricNameFromLabelAdapters(ts.Labels) - samples := make([]client.Sample, 0, len(ts.Samples)) + samples := make([]ingester_client.Sample, 0, len(ts.Samples)) for _, s := range ts.Samples { if err := validation.ValidateSample(d.limits, userID, metricName, s); err != nil { return emptyPreallocSeries, err @@ -374,8 +415,8 @@ func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, user samples = append(samples, s) } - return client.PreallocTimeseries{ - TimeSeries: &client.TimeSeries{ + return ingester_client.PreallocTimeseries{ + TimeSeries: &ingester_client.TimeSeries{ Labels: ts.Labels, Samples: samples, }, @@ -384,11 +425,15 @@ func (d *Distributor) validateSeries(ts ingester_client.PreallocTimeseries, user } // Push implements client.IngesterServer -func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*client.WriteResponse, error) { +func (d *Distributor) Push(ctx context.Context, req *ingester_client.WriteRequest) (*ingester_client.WriteResponse, error) { userID, err := tenant.TenantID(ctx) if err != nil { return nil, err } + + now := time.Now() + d.activeUsers.UpdateUserTimestamp(userID, now.UnixNano()) + source := util.GetSourceIPsFromOutgoingCtx(ctx) var firstPartialErr error @@ -406,8 +451,8 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie // A WriteRequest can only contain series or metadata but not both. This might change in the future. // For each timeseries or samples, we compute a hash to distribute across ingesters; // check each sample/metadata and discard if outside limits. - validatedTimeseries := make([]client.PreallocTimeseries, 0, len(req.Timeseries)) - validatedMetadata := make([]*client.MetricMetadata, 0, len(req.Metadata)) + validatedTimeseries := make([]ingester_client.PreallocTimeseries, 0, len(req.Timeseries)) + validatedMetadata := make([]*ingester_client.MetricMetadata, 0, len(req.Metadata)) metadataKeys := make([]uint32, 0, len(req.Metadata)) seriesKeys := make([]uint32, 0, len(req.Timeseries)) validatedSamples := 0 @@ -416,13 +461,19 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie cluster, replica := findHALabels(d.limits.HAReplicaLabel(userID), d.limits.HAClusterLabel(userID), req.Timeseries[0].Labels) removeReplica, err = d.checkSample(ctx, userID, cluster, replica) if err != nil { - if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() == 202 { + // Ensure the request slice is reused if the series get deduped. + ingester_client.ReuseSlice(req.Timeseries) + + if errors.Is(err, replicasNotMatchError{}) { // These samples have been deduped. dedupedSamples.WithLabelValues(userID, cluster).Add(float64(numSamples)) + return nil, httpgrpc.Errorf(http.StatusAccepted, err.Error()) } - // Ensure the request slice is reused if the series get deduped. - client.ReuseSlice(req.Timeseries) + if errors.Is(err, tooManyClustersError{}) { + validation.DiscardedSamples.WithLabelValues(validation.TooManyHAClusters, userID).Add(float64(numSamples)) + return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) + } return nil, err } @@ -445,12 +496,12 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie for _, ts := range req.Timeseries { // Use timestamp of latest sample in the series. If samples for series are not ordered, metric for user may be wrong. if len(ts.Samples) > 0 { - latestSampleTimestampMs = util.Max64(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) + latestSampleTimestampMs = math.Max64(latestSampleTimestampMs, ts.Samples[len(ts.Samples)-1].TimestampMs) } if mrc := d.limits.MetricRelabelConfigs(userID); len(mrc) > 0 { - l := relabel.Process(client.FromLabelAdaptersToLabels(ts.Labels), mrc...) - ts.Labels = client.FromLabelsToLabelAdapters(l) + l := relabel.Process(ingester_client.FromLabelAdaptersToLabels(ts.Labels), mrc...) + ts.Labels = ingester_client.FromLabelsToLabelAdapters(l) } // If we found both the cluster and replica labels, we only want to include the cluster label when @@ -482,7 +533,8 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie return nil, err } - validatedSeries, err := d.validateSeries(ts, userID) + skipLabelNameValidation := d.cfg.SkipLabelNameValidation || req.GetSkipLabelNameValidation() + validatedSeries, err := d.validateSeries(ts, userID, skipLabelNameValidation) // Errors in validation are considered non-fatal, as one series in a request may contain // invalid data but all the remaining series could be perfectly valid. @@ -520,16 +572,15 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie if len(seriesKeys) == 0 && len(metadataKeys) == 0 { // Ensure the request slice is reused if there's no series or metadata passing the validation. - client.ReuseSlice(req.Timeseries) + ingester_client.ReuseSlice(req.Timeseries) - return &client.WriteResponse{}, firstPartialErr + return &ingester_client.WriteResponse{}, firstPartialErr } - now := time.Now() totalN := validatedSamples + len(validatedMetadata) if !d.ingestionRateLimiter.AllowN(now, userID, totalN) { // Ensure the request slice is reused if the request is rate limited. - client.ReuseSlice(req.Timeseries) + ingester_client.ReuseSlice(req.Timeseries) // Return a 4xx here to have the client discard the data and not retry. If a client // is sending too much data consistently we will unlikely ever catch up otherwise. @@ -538,7 +589,7 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie return nil, httpgrpc.Errorf(http.StatusTooManyRequests, "ingestion rate limit (%v) exceeded while adding %d samples and %d metadata", d.ingestionRateLimiter.Limit(now, userID), validatedSamples, len(validatedMetadata)) } - subRing := d.ingestersRing.(ring.ReadRing) + subRing := d.ingestersRing // Obtain a subring if required. if d.cfg.ShardingStrategy == util.ShardingStrategyShuffle { @@ -548,9 +599,14 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie keys := append(seriesKeys, metadataKeys...) initialMetadataIndex := len(seriesKeys) - err = ring.DoBatch(ctx, subRing, keys, func(ingester ring.IngesterDesc, indexes []int) error { - timeseries := make([]client.PreallocTimeseries, 0, len(indexes)) - var metadata []*client.MetricMetadata + op := ring.WriteNoExtend + if d.cfg.ExtendWrites { + op = ring.Write + } + + err = ring.DoBatch(ctx, op, subRing, keys, func(ingester ring.InstanceDesc, indexes []int) error { + timeseries := make([]ingester_client.PreallocTimeseries, 0, len(indexes)) + var metadata []*ingester_client.MetricMetadata for _, i := range indexes { if i >= initialMetadataIndex { @@ -572,14 +628,14 @@ func (d *Distributor) Push(ctx context.Context, req *client.WriteRequest) (*clie localCtx = util.AddSourceIPsToOutgoingContext(localCtx, source) return d.send(localCtx, ingester, timeseries, metadata, req.Source) - }, func() { client.ReuseSlice(req.Timeseries) }) + }, func() { ingester_client.ReuseSlice(req.Timeseries) }) if err != nil { return nil, err } - return &client.WriteResponse{}, firstPartialErr + return &ingester_client.WriteResponse{}, firstPartialErr } -func sortLabelsIfNeeded(labels []client.LabelAdapter) { +func sortLabelsIfNeeded(labels []ingester_client.LabelAdapter) { // no need to run sort.Slice, if labels are already sorted, which is most of the time. // we can avoid extra memory allocations (mostly interface-related) this way. sorted := true @@ -601,14 +657,14 @@ func sortLabelsIfNeeded(labels []client.LabelAdapter) { }) } -func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, timeseries []client.PreallocTimeseries, metadata []*client.MetricMetadata, source client.WriteRequest_SourceEnum) error { +func (d *Distributor) send(ctx context.Context, ingester ring.InstanceDesc, timeseries []ingester_client.PreallocTimeseries, metadata []*ingester_client.MetricMetadata, source ingester_client.WriteRequest_SourceEnum) error { h, err := d.ingesterPool.GetClientFor(ingester.Addr) if err != nil { return err } c := h.(ingester_client.IngesterClient) - req := client.WriteRequest{ + req := ingester_client.WriteRequest{ Timeseries: timeseries, Metadata: metadata, Source: source, @@ -632,8 +688,8 @@ func (d *Distributor) send(ctx context.Context, ingester ring.IngesterDesc, time } // ForReplicationSet runs f, in parallel, for all ingesters in the input replication set. -func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, client.IngesterClient) (interface{}, error)) ([]interface{}, error) { - return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) { +func (d *Distributor) ForReplicationSet(ctx context.Context, replicationSet ring.ReplicationSet, f func(context.Context, ingester_client.IngesterClient) (interface{}, error)) ([]interface{}, error) { + return replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -650,12 +706,12 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to mode return nil, err } - req := &client.LabelValuesRequest{ + req := &ingester_client.LabelValuesRequest{ LabelName: string(labelName), StartTimestampMs: int64(from), EndTimestampMs: int64(to), } - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { return client.LabelValues(ctx, req) }) if err != nil { @@ -664,7 +720,7 @@ func (d *Distributor) LabelValuesForLabelName(ctx context.Context, from, to mode valueSet := map[string]struct{}{} for _, resp := range resps { - for _, v := range resp.(*client.LabelValuesResponse).LabelValues { + for _, v := range resp.(*ingester_client.LabelValuesResponse).LabelValues { valueSet[v] = struct{}{} } } @@ -687,11 +743,11 @@ func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time) ([]st return nil, err } - req := &client.LabelNamesRequest{ + req := &ingester_client.LabelNamesRequest{ StartTimestampMs: int64(from), EndTimestampMs: int64(to), } - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { return client.LabelNames(ctx, req) }) if err != nil { @@ -700,7 +756,7 @@ func (d *Distributor) LabelNames(ctx context.Context, from, to model.Time) ([]st valueSet := map[string]struct{}{} for _, resp := range resps { - for _, v := range resp.(*client.LabelNamesResponse).LabelNames { + for _, v := range resp.(*ingester_client.LabelNamesResponse).LabelNames { valueSet[v] = struct{}{} } } @@ -727,7 +783,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through return nil, err } - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { return client.MetricsForLabelMatchers(ctx, req) }) if err != nil { @@ -736,7 +792,7 @@ func (d *Distributor) MetricsForLabelMatchers(ctx context.Context, from, through metrics := map[model.Fingerprint]model.Metric{} for _, resp := range resps { - ms := ingester_client.FromMetricsForLabelMatchersResponse(resp.(*client.MetricsForLabelMatchersResponse)) + ms := ingester_client.FromMetricsForLabelMatchersResponse(resp.(*ingester_client.MetricsForLabelMatchersResponse)) for _, m := range ms { metrics[m.Fingerprint()] = m } @@ -760,7 +816,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad req := &ingester_client.MetricsMetadataRequest{} // TODO(gotjosh): We only need to look in all the ingesters if shardByAllLabels is enabled. - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { + resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { return client.MetricsMetadata(ctx, req) }) if err != nil { @@ -770,7 +826,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad result := []scrape.MetricMetadata{} dedupTracker := map[ingester_client.MetricMetadata]struct{}{} for _, resp := range resps { - r := resp.(*client.MetricsMetadataResponse) + r := resp.(*ingester_client.MetricsMetadataResponse) for _, m := range r.Metadata { // Given we look across all ingesters - dedup the metadata. _, ok := dedupTracker[*m] @@ -783,7 +839,7 @@ func (d *Distributor) MetricsMetadata(ctx context.Context) ([]scrape.MetricMetad Metric: m.MetricFamilyName, Help: m.Help, Unit: m.Unit, - Type: client.MetricMetadataMetricTypeToMetricType(m.GetType()), + Type: ingester_client.MetricMetadataMetricTypeToMetricType(m.GetType()), }) } } @@ -801,8 +857,8 @@ func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { // Make sure we get a successful response from all of them. replicationSet.MaxErrors = 0 - req := &client.UserStatsRequest{} - resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client client.IngesterClient) (interface{}, error) { + req := &ingester_client.UserStatsRequest{} + resps, err := d.ForReplicationSet(ctx, replicationSet, func(ctx context.Context, client ingester_client.IngesterClient) (interface{}, error) { return client.UserStats(ctx, req) }) if err != nil { @@ -811,7 +867,7 @@ func (d *Distributor) UserStats(ctx context.Context) (*UserStats, error) { totalStats := &UserStats{} for _, resp := range resps { - r := resp.(*client.UserStatsResponse) + r := resp.(*ingester_client.UserStatsResponse) totalStats.IngestionRate += r.IngestionRate totalStats.APIIngestionRate += r.ApiIngestionRate totalStats.RuleIngestionRate += r.RuleIngestionRate @@ -836,7 +892,7 @@ func (d *Distributor) AllUserStats(ctx context.Context) ([]UserIDStats, error) { // Add up by user, across all responses from ingesters perUserTotals := make(map[string]UserStats) - req := &client.UserStatsRequest{} + req := &ingester_client.UserStatsRequest{} ctx = user.InjectOrgID(ctx, "1") // fake: ingester insists on having an org ID // Not using d.ForReplicationSet(), so we can fail after first error. replicationSet, err := d.ingestersRing.GetAllHealthy(ring.Read) diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go index af77a5d98bc..78296341f78 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/distributor_ring.go @@ -9,8 +9,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // RingConfig masks the ring lifecycler config which contains @@ -36,7 +36,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go index d6cbe3b252d..c7ebba194f0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/ha_tracker.go @@ -6,7 +6,6 @@ import ( "flag" "fmt" "math/rand" - "net/http" "strings" "sync" "time" @@ -17,13 +16,13 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/prometheus/pkg/timestamp" - "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/mtime" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -54,6 +53,12 @@ var ( errInvalidFailoverTimeout = "HA Tracker failover timeout (%v) must be at least 1s greater than update timeout - max jitter (%v)" ) +type haTrackerLimits interface { + // Returns max number of clusters that HA tracker should track for a user. + // Samples from additional clusters are rejected. + MaxHAClusters(user string) int +} + // ProtoReplicaDescFactory makes new InstanceDescs func ProtoReplicaDescFactory() proto.Message { return NewReplicaDesc() @@ -73,10 +78,11 @@ type haTracker struct { cfg HATrackerConfig client kv.Client updateTimeoutJitter time.Duration + limits haTrackerLimits - // Replicas we are accepting samples from. electedLock sync.RWMutex - elected map[string]ReplicaDesc + elected map[string]ReplicaDesc // Replicas we are accepting samples from. Key = "user/cluster". + clusters map[string]int // Number of clusters with elected replicas that a single user has. Key = user. } // HATrackerConfig contains the configuration require to @@ -143,17 +149,19 @@ func GetReplicaDescCodec() codec.Proto { // NewClusterTracker returns a new HA cluster tracker using either Consul // or in-memory KV store. Tracker must be started via StartAsync(). -func newClusterTracker(cfg HATrackerConfig, reg prometheus.Registerer) (*haTracker, error) { +func newClusterTracker(cfg HATrackerConfig, limits haTrackerLimits, reg prometheus.Registerer) (*haTracker, error) { var jitter time.Duration if cfg.UpdateTimeoutJitterMax > 0 { jitter = time.Duration(rand.Int63n(int64(2*cfg.UpdateTimeoutJitterMax))) - cfg.UpdateTimeoutJitterMax } t := &haTracker{ - logger: util.Logger, + logger: util_log.Logger, cfg: cfg, updateTimeoutJitter: jitter, + limits: limits, elected: map[string]ReplicaDesc{}, + clusters: map[string]int{}, } if cfg.EnableHATracker { @@ -186,19 +194,25 @@ func (c *haTracker) loop(ctx context.Context) error { replica := value.(*ReplicaDesc) c.electedLock.Lock() defer c.electedLock.Unlock() - chunks := strings.SplitN(key, "/", 2) + segments := strings.SplitN(key, "/", 2) - // The prefix has already been stripped, so a valid key would look like cluster/replica, - // and a key without a / such as `ring` would be invalid. - if len(chunks) != 2 { + // Valid key would look like cluster/replica, and a key without a / such as `ring` would be invalid. + if len(segments) != 2 { return true } - if replica.Replica != c.elected[key].Replica { - electedReplicaChanges.WithLabelValues(chunks[0], chunks[1]).Inc() + user := segments[0] + cluster := segments[1] + + elected, exists := c.elected[key] + if replica.Replica != elected.Replica { + electedReplicaChanges.WithLabelValues(user, cluster).Inc() + } + if !exists { + c.clusters[user]++ } c.elected[key] = *replica - electedReplicaTimestamp.WithLabelValues(chunks[0], chunks[1]).Set(float64(replica.ReceivedAt / 1000)) + electedReplicaTimestamp.WithLabelValues(user, cluster).Set(float64(replica.ReceivedAt / 1000)) electedReplicaPropagationTime.Observe(time.Since(timestamp.Time(replica.ReceivedAt)).Seconds()) return true }) @@ -210,7 +224,7 @@ func (c *haTracker) loop(ctx context.Context) error { // tracker c to see if we should accept the incomming sample. It will return an error if the sample // should not be accepted. Note that internally this function does checks against the stored values // and may modify the stored data, for example to failover between replicas after a certain period of time. -// A 202 response code is returned (from checkKVstore) if we shouldn't store this sample but are +// replicasNotMatchError is returned (from checkKVStore) if we shouldn't store this sample but are // accepting samples from another replica for the cluster, so that there isn't a bunch of error's returned // to customers clients. func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica string) error { @@ -220,23 +234,33 @@ func (c *haTracker) checkReplica(ctx context.Context, userID, cluster, replica s } key := fmt.Sprintf("%s/%s", userID, cluster) now := mtime.Now() + c.electedLock.RLock() entry, ok := c.elected[key] + clusters := c.clusters[userID] c.electedLock.RUnlock() + if ok && now.Sub(timestamp.Time(entry.ReceivedAt)) < c.cfg.UpdateTimeout+c.updateTimeoutJitter { if entry.Replica != replica { - return replicasNotMatchError(replica, entry.Replica) + return replicasNotMatchError{replica: replica, elected: entry.Replica} } return nil } + if !ok { + // If we don't know about this cluster yet and we have reached the limit for number of clusters, we error out now. + if limit := c.limits.MaxHAClusters(userID); limit > 0 && clusters+1 > limit { + return tooManyClustersError{limit: limit} + } + } + err := c.checkKVStore(ctx, key, replica, now) kvCASCalls.WithLabelValues(userID, cluster).Inc() if err != nil { - // The callback within checkKVStore will return a 202 if the sample is being deduped, + // The callback within checkKVStore will return a replicasNotMatchError if the sample is being deduped, // otherwise there may have been an actual error CAS'ing that we should log. - if resp, ok := httpgrpc.HTTPResponseFromError(err); ok && resp.GetCode() != 202 { - level.Error(util.Logger).Log("msg", "rejecting sample", "err", err) + if !errors.Is(err, replicasNotMatchError{}) { + level.Error(util_log.Logger).Log("msg", "rejecting sample", "err", err) } } return err @@ -253,10 +277,9 @@ func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now t } // We shouldn't failover to accepting a new replica if the timestamp we've received this sample at - // is less than failOver timeout amount of time since the timestamp in the KV store. + // is less than failover timeout amount of time since the timestamp in the KV store. if desc.Replica != replica && now.Sub(timestamp.Time(desc.ReceivedAt)) < c.cfg.FailoverTimeout { - // Return a 202. - return nil, false, replicasNotMatchError(replica, desc.Replica) + return nil, false, replicasNotMatchError{replica: replica, elected: desc.Replica} } } @@ -269,8 +292,39 @@ func (c *haTracker) checkKVStore(ctx context.Context, key, replica string, now t }) } -func replicasNotMatchError(replica, elected string) error { - return httpgrpc.Errorf(http.StatusAccepted, "replicas did not mach, rejecting sample: replica=%s, elected=%s", replica, elected) +type replicasNotMatchError struct { + replica, elected string +} + +func (e replicasNotMatchError) Error() string { + return fmt.Sprintf("replicas did not mach, rejecting sample: replica=%s, elected=%s", e.replica, e.elected) +} + +// Needed for errors.Is to work properly. +func (e replicasNotMatchError) Is(err error) bool { + _, ok1 := err.(replicasNotMatchError) + _, ok2 := err.(*replicasNotMatchError) + return ok1 || ok2 +} + +// IsOperationAborted returns whether the error has been caused by an operation intentionally aborted. +func (e replicasNotMatchError) IsOperationAborted() bool { + return true +} + +type tooManyClustersError struct { + limit int +} + +func (e tooManyClustersError) Error() string { + return fmt.Sprintf("too many HA clusters (limit: %d)", e.limit) +} + +// Needed for errors.Is to work properly. +func (e tooManyClustersError) Is(err error) bool { + _, ok1 := err.(tooManyClustersError) + _, ok2 := err.(*tooManyClustersError) + return ok1 || ok2 } func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapter) (string, string) { @@ -279,12 +333,26 @@ func findHALabels(replicaLabel, clusterLabel string, labels []client.LabelAdapte for _, pair = range labels { if pair.Name == replicaLabel { - replica = string(pair.Value) + replica = pair.Value } if pair.Name == clusterLabel { - cluster = string(pair.Value) + cluster = pair.Value } } return cluster, replica } + +func cleanupHATrackerMetricsForUser(userID string, logger log.Logger) { + filter := map[string]string{"user": userID} + + if err := util.DeleteMatchingLabels(electedReplicaChanges, filter); err != nil { + level.Warn(logger).Log("msg", "failed to remove cortex_ha_tracker_elected_replica_changes_total metric for user", "user", userID, "err", err) + } + if err := util.DeleteMatchingLabels(electedReplicaTimestamp, filter); err != nil { + level.Warn(logger).Log("msg", "failed to remove cortex_ha_tracker_elected_replica_timestamp_seconds metric for user", "user", userID, "err", err) + } + if err := util.DeleteMatchingLabels(kvCASCalls, filter); err != nil { + level.Warn(logger).Log("msg", "failed to remove cortex_ha_tracker_kv_store_cas_total metric for user", "user", userID, "err", err) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go index 9628fc64323..b8a214ab98c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go +++ b/vendor/github.com/cortexproject/cortex/pkg/distributor/query.go @@ -10,7 +10,6 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/weaveworks/common/instrument" - "github.com/cortexproject/cortex/pkg/ingester/client" ingester_client "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/tenant" @@ -97,7 +96,7 @@ func (d *Distributor) GetIngestersForQuery(ctx context.Context, matchers ...*lab metricNameMatcher, _, ok := extract.MetricNameMatcherFromMatchers(matchers) if ok && metricNameMatcher.Type == labels.MatchEqual { - return d.ingestersRing.Get(shardByMetricName(userID, metricNameMatcher.Value), ring.Read, nil) + return d.ingestersRing.Get(shardByMetricName(userID, metricNameMatcher.Value), ring.Read, nil, nil, nil) } } @@ -127,10 +126,10 @@ func (d *Distributor) GetIngestersForMetadata(ctx context.Context) (ring.Replica } // queryIngesters queries the ingesters via the older, sample-based API. -func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.ReplicationSet, req *client.QueryRequest) (model.Matrix, error) { +func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (model.Matrix, error) { // Fetch samples from multiple ingesters in parallel, using the replicationSet // to deal with consistency. - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -173,9 +172,9 @@ func (d *Distributor) queryIngesters(ctx context.Context, replicationSet ring.Re } // queryIngesterStream queries the ingesters using the new streaming API. -func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *client.QueryRequest) (*ingester_client.QueryStreamResponse, error) { +func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ring.ReplicationSet, req *ingester_client.QueryRequest) (*ingester_client.QueryStreamResponse, error) { // Fetch samples from multiple ingesters - results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.IngesterDesc) (interface{}, error) { + results, err := replicationSet.Do(ctx, d.cfg.ExtraQueryDelay, func(ctx context.Context, ing *ring.InstanceDesc) (interface{}, error) { client, err := d.ingesterPool.GetClientFor(ing.Addr) if err != nil { return nil, err @@ -220,7 +219,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri // Parse any chunk series for _, series := range response.Chunkseries { - key := client.LabelsToKeyString(client.FromLabelAdaptersToLabels(series.Labels)) + key := ingester_client.LabelsToKeyString(ingester_client.FromLabelAdaptersToLabels(series.Labels)) existing := hashToChunkseries[key] existing.Labels = series.Labels existing.Chunks = append(existing.Chunks, series.Chunks...) @@ -229,7 +228,7 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri // Parse any time series for _, series := range response.Timeseries { - key := client.LabelsToKeyString(client.FromLabelAdaptersToLabels(series.Labels)) + key := ingester_client.LabelsToKeyString(ingester_client.FromLabelAdaptersToLabels(series.Labels)) existing := hashToTimeSeries[key] existing.Labels = series.Labels if existing.Samples == nil { @@ -242,8 +241,8 @@ func (d *Distributor) queryIngesterStream(ctx context.Context, replicationSet ri } resp := &ingester_client.QueryStreamResponse{ - Chunkseries: make([]client.TimeSeriesChunk, 0, len(hashToChunkseries)), - Timeseries: make([]client.TimeSeries, 0, len(hashToTimeSeries)), + Chunkseries: make([]ingester_client.TimeSeriesChunk, 0, len(hashToChunkseries)), + Timeseries: make([]ingester_client.TimeSeries, 0, len(hashToTimeSeries)), } for _, series := range hashToChunkseries { resp.Chunkseries = append(resp.Chunkseries, series) diff --git a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go index 6157599a875..5f7ae6b4b73 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go +++ b/vendor/github.com/cortexproject/cortex/pkg/flusher/flusher.go @@ -5,6 +5,7 @@ import ( "flag" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" @@ -41,6 +42,7 @@ type Flusher struct { chunkStore ingester.ChunkStore limits *validation.Overrides registerer prometheus.Registerer + logger log.Logger } const ( @@ -55,6 +57,7 @@ func New( chunkStore ingester.ChunkStore, limits *validation.Overrides, registerer prometheus.Registerer, + logger log.Logger, ) (*Flusher, error) { // These are ignored by blocks-ingester, but that's fine. @@ -68,13 +71,14 @@ func New( chunkStore: chunkStore, limits: limits, registerer: registerer, + logger: logger, } f.Service = services.NewBasicService(nil, f.running, nil) return f, nil } func (f *Flusher) running(ctx context.Context) error { - ing, err := ingester.NewForFlusher(f.ingesterConfig, f.chunkStore, f.limits, f.registerer) + ing, err := ingester.NewForFlusher(f.ingesterConfig, f.chunkStore, f.limits, f.registerer, f.logger) if err != nil { return errors.Wrap(err, "create ingester") } @@ -87,7 +91,7 @@ func (f *Flusher) running(ctx context.Context) error { // Sleeping to give a chance to Prometheus // to collect the metrics. - level.Info(util.Logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String()) + level.Info(f.logger).Log("msg", "sleeping to give chance for collection of metrics", "duration", postFlushSleepTime.String()) time.Sleep(postFlushSleepTime) if err := services.StopAndAwaitTerminated(ctx, ing); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go index a043588e89f..2b10065bcaa 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/transport/handler.go @@ -9,6 +9,7 @@ import ( "io/ioutil" "net/http" "net/url" + "strconv" "strings" "time" @@ -21,12 +22,13 @@ import ( querier_stats "github.com/cortexproject/cortex/pkg/querier/stats" "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( // StatusClientClosedRequest is the status code for when a client request cancellation of an http request StatusClientClosedRequest = 499 + ServiceTimingHeaderName = "Server-Timing" ) var ( @@ -45,7 +47,7 @@ type HandlerConfig struct { func (cfg *HandlerConfig) RegisterFlags(f *flag.FlagSet) { f.DurationVar(&cfg.LogQueriesLongerThan, "frontend.log-queries-longer-than", 0, "Log queries that are slower than the specified duration. Set to 0 to disable. Set to < 0 to enable on all queries.") f.Int64Var(&cfg.MaxBodySize, "frontend.max-body-size", 10*1024*1024, "Max body size for downstream prometheus.") - f.BoolVar(&cfg.QueryStatsEnabled, "frontend.query-stats-enabled", false, "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query. This configuration option must be set both on query-frontend and querier.") + f.BoolVar(&cfg.QueryStatsEnabled, "frontend.query-stats-enabled", false, "True to enable query statistics tracking. When enabled, a message with some statistics is logged for every query.") } // Handler accepts queries and forwards them to RoundTripper. It can log slow queries, @@ -114,6 +116,10 @@ func (f *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { hs[h] = vs } + if f.cfg.QueryStatsEnabled { + writeServiceTimingHeader(queryResponseTime, hs, stats) + } + w.WriteHeader(resp.StatusCode) // we don't check for copy error as there is no much we can do at this point _, _ = io.Copy(w, resp.Body) @@ -142,14 +148,15 @@ func (f *Handler) reportSlowQuery(r *http.Request, queryString url.Values, query "time_taken", queryResponseTime.String(), }, formatQueryString(queryString)...) - level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) + level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, queryResponseTime time.Duration, stats *querier_stats.Stats) { - userID, err := tenant.TenantID(r.Context()) + tenantIDs, err := tenant.TenantIDs(r.Context()) if err != nil { return } + userID := tenant.JoinTenantIDs(tenantIDs) // Track stats. f.querySeconds.WithLabelValues(userID).Add(stats.LoadWallTime().Seconds()) @@ -160,10 +167,10 @@ func (f *Handler) reportQueryStats(r *http.Request, queryString url.Values, quer "method", r.Method, "path", r.URL.Path, "response_time", queryResponseTime, - "query_wall_time", stats.LoadWallTime(), + "query_wall_time_seconds", stats.LoadWallTime().Seconds(), }, formatQueryString(queryString)...) - level.Info(util.WithContext(r.Context(), f.log)).Log(logMessage...) + level.Info(util_log.WithContext(r.Context(), f.log)).Log(logMessage...) } func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) url.Values { @@ -173,7 +180,7 @@ func (f *Handler) parseRequestQueryString(r *http.Request, bodyBuf bytes.Buffer) // Ensure the form has been parsed so all the parameters are present err := r.ParseForm() if err != nil { - level.Warn(util.WithContext(r.Context(), f.log)).Log("msg", "unable to parse request form", "err", err) + level.Warn(util_log.WithContext(r.Context(), f.log)).Log("msg", "unable to parse request form", "err", err) return nil } @@ -200,3 +207,17 @@ func writeError(w http.ResponseWriter, err error) { } server.WriteError(w, err) } + +func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) { + if stats != nil { + parts := make([]string, 0) + parts = append(parts, statsValue("querier_wall_time", stats.LoadWallTime())) + parts = append(parts, statsValue("response_time", queryResponseTime)) + headers.Set(ServiceTimingHeaderName, strings.Join(parts, ", ")) + } +} + +func statsValue(name string, d time.Duration) string { + durationInMs := strconv.FormatFloat(float64(d)/float64(time.Millisecond), 'f', -1, 64) + return name + ";dur=" + durationInMs +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go index 385ec26e9fa..42ed8be84c0 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontend.go @@ -20,6 +20,7 @@ import ( "github.com/cortexproject/cortex/pkg/scheduler/queue" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/grpcutil" + "github.com/cortexproject/cortex/pkg/util/validation" ) var ( @@ -190,8 +191,9 @@ func (f *Frontend) Process(server frontendv1pb.Frontend_ProcessServer) error { errs := make(chan error, 1) go func() { err = server.Send(&frontendv1pb.FrontendToClient{ - Type: frontendv1pb.HTTP_REQUEST, - HttpRequest: req.request, + Type: frontendv1pb.HTTP_REQUEST, + HttpRequest: req.request, + StatsEnabled: stats.IsEnabled(req.originalCtx), }) if err != nil { errs <- err @@ -256,7 +258,7 @@ func getQuerierID(server frontendv1pb.Frontend_ProcessServer) (string, error) { } func (f *Frontend) queueRequest(ctx context.Context, req *request) error { - userID, err := tenant.TenantID(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return err } @@ -264,9 +266,10 @@ func (f *Frontend) queueRequest(ctx context.Context, req *request) error { req.enqueueTime = time.Now() req.queueSpan, _ = opentracing.StartSpanFromContext(ctx, "queued") - maxQueriers := f.limits.MaxQueriersPerUser(userID) + // aggregate the max queriers limit in the case of a multi tenant query + maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, f.limits.MaxQueriersPerUser) - err = f.requestQueue.EnqueueRequest(userID, req, maxQueriers, nil) + err = f.requestQueue.EnqueueRequest(tenant.JoinTenantIDs(tenantIDs), req, maxQueriers, nil) if err == queue.ErrTooManyRequests { return errTooManyRequest } diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go index 2fae54a845a..ac747e5f8cd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.pb.go @@ -59,6 +59,9 @@ func (Type) EnumDescriptor() ([]byte, []int) { type FrontendToClient struct { HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,1,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` Type Type `protobuf:"varint,2,opt,name=type,proto3,enum=frontend.Type" json:"type,omitempty"` + // Whether query statistics tracking should be enabled. The response will include + // statistics only when this option is enabled. + StatsEnabled bool `protobuf:"varint,3,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` } func (m *FrontendToClient) Reset() { *m = FrontendToClient{} } @@ -107,6 +110,13 @@ func (m *FrontendToClient) GetType() Type { return HTTP_REQUEST } +func (m *FrontendToClient) GetStatsEnabled() bool { + if m != nil { + return m.StatsEnabled + } + return false +} + type ClientToFrontend struct { HttpResponse *httpgrpc.HTTPResponse `protobuf:"bytes,1,opt,name=httpResponse,proto3" json:"httpResponse,omitempty"` ClientID string `protobuf:"bytes,2,opt,name=clientID,proto3" json:"clientID,omitempty"` @@ -175,34 +185,35 @@ func init() { func init() { proto.RegisterFile("frontend.proto", fileDescriptor_eca3873955a29cfe) } var fileDescriptor_eca3873955a29cfe = []byte{ - // 419 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x41, 0x6f, 0xd3, 0x30, - 0x14, 0xc7, 0x6d, 0x18, 0xa3, 0x78, 0x51, 0x15, 0x59, 0x02, 0x55, 0x39, 0x58, 0x53, 0xc4, 0xa1, - 0x42, 0x22, 0x81, 0x82, 0x84, 0x84, 0xc4, 0x65, 0xac, 0x8c, 0xdd, 0x46, 0x1a, 0x2e, 0x5c, 0xa6, - 0x25, 0x78, 0x59, 0x19, 0xcd, 0xf3, 0x6c, 0xa7, 0xa5, 0x37, 0x3e, 0x01, 0xe2, 0x63, 0xf0, 0x51, - 0x38, 0xf6, 0xd8, 0x23, 0x4d, 0x2f, 0x1c, 0xfb, 0x11, 0x50, 0xec, 0x34, 0x64, 0xbd, 0x58, 0xfe, - 0xeb, 0xff, 0xde, 0xfb, 0xbd, 0xbf, 0x4d, 0xba, 0x97, 0x12, 0x72, 0xcd, 0xf3, 0xcf, 0x81, 0x90, - 0xa0, 0x81, 0x76, 0xb6, 0xda, 0x7b, 0x9a, 0x8d, 0xf5, 0x55, 0x91, 0x04, 0x29, 0x4c, 0xc2, 0x0c, - 0x32, 0x08, 0x4d, 0x41, 0x52, 0x5c, 0x1a, 0x65, 0x84, 0xb9, 0xd9, 0x46, 0xef, 0x65, 0xab, 0x7c, - 0xc6, 0x2f, 0xa6, 0x7c, 0x06, 0xf2, 0x5a, 0x85, 0x29, 0x4c, 0x26, 0x90, 0x87, 0x57, 0x5a, 0x8b, - 0x4c, 0x8a, 0xb4, 0xb9, 0xd4, 0x5d, 0x6f, 0x5a, 0x5d, 0x29, 0x48, 0xcd, 0xbf, 0x09, 0x09, 0x5f, - 0x78, 0xaa, 0x6b, 0x15, 0x8a, 0xeb, 0x2c, 0xbc, 0x29, 0xb8, 0x1c, 0x73, 0x19, 0x2a, 0x7d, 0xa1, - 0x95, 0x3d, 0x6d, 0xbb, 0x0f, 0xc4, 0x7d, 0x57, 0xef, 0x1b, 0xc3, 0xdb, 0xaf, 0x63, 0x9e, 0x6b, - 0xfa, 0x8a, 0x1c, 0x54, 0x90, 0x88, 0xdf, 0x14, 0x5c, 0xe9, 0x1e, 0x3e, 0xc4, 0xfd, 0x83, 0xc1, - 0xc3, 0xa0, 0x01, 0xbf, 0x8f, 0xe3, 0xb3, 0xda, 0x8c, 0xda, 0x95, 0xd4, 0x27, 0x7b, 0x7a, 0x2e, - 0x78, 0xef, 0xce, 0x21, 0xee, 0x77, 0x07, 0xdd, 0xa0, 0x79, 0x99, 0x78, 0x2e, 0x78, 0x64, 0x3c, - 0xff, 0x07, 0x26, 0xae, 0xe5, 0xc4, 0xb0, 0x25, 0xd3, 0xd7, 0xc4, 0xb1, 0x73, 0x94, 0x80, 0x5c, - 0xf1, 0x1a, 0xf9, 0x68, 0x17, 0x69, 0xdd, 0xe8, 0x56, 0x2d, 0xf5, 0x48, 0x27, 0x35, 0xf3, 0x4e, - 0x8f, 0x0d, 0xf8, 0x41, 0xd4, 0x68, 0xea, 0x93, 0x7b, 0x26, 0x6c, 0xef, 0xae, 0x19, 0xe8, 0x04, - 0x36, 0xfa, 0xa8, 0x3a, 0x23, 0x6b, 0x3d, 0x79, 0x4c, 0xf6, 0xaa, 0xf5, 0xa8, 0x4b, 0x9c, 0x8a, - 0x72, 0x1e, 0x0d, 0x3f, 0x7c, 0x1c, 0x8e, 0x62, 0x17, 0x51, 0x42, 0xf6, 0x4f, 0x86, 0xf1, 0xf9, - 0xe9, 0xb1, 0x8b, 0x07, 0x23, 0xd2, 0x69, 0xb6, 0x3d, 0x21, 0xf7, 0xcf, 0x24, 0xa4, 0x5c, 0x29, - 0xea, 0xfd, 0xcf, 0xb8, 0x1b, 0xca, 0x6b, 0x79, 0xbb, 0x4f, 0xec, 0xa3, 0x3e, 0x7e, 0x86, 0x8f, - 0x8e, 0x16, 0x2b, 0x86, 0x96, 0x2b, 0x86, 0x36, 0x2b, 0x86, 0xbf, 0x97, 0x0c, 0xff, 0x2a, 0x19, - 0xfe, 0x5d, 0x32, 0xbc, 0x28, 0x19, 0xfe, 0x53, 0x32, 0xfc, 0xb7, 0x64, 0x68, 0x53, 0x32, 0xfc, - 0x73, 0xcd, 0xd0, 0x62, 0xcd, 0xd0, 0x72, 0xcd, 0xd0, 0x27, 0x67, 0x3b, 0x76, 0xfa, 0x5c, 0x24, - 0xc9, 0xbe, 0xf9, 0xc7, 0x17, 0xff, 0x02, 0x00, 0x00, 0xff, 0xff, 0xe0, 0xa2, 0x48, 0x34, 0x87, - 0x02, 0x00, 0x00, + // 441 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0xc1, 0x6e, 0xd3, 0x30, + 0x18, 0xc7, 0xfd, 0xc1, 0x18, 0xc5, 0x8d, 0xaa, 0xc8, 0x12, 0xa8, 0xca, 0xc1, 0xaa, 0x22, 0x0e, + 0x15, 0x12, 0x09, 0x14, 0x24, 0x24, 0x24, 0x2e, 0x63, 0x65, 0xec, 0x36, 0xdc, 0x70, 0xe1, 0x32, + 0x35, 0x99, 0x97, 0x95, 0xad, 0xb1, 0x67, 0xbb, 0x1b, 0xbb, 0xf1, 0x04, 0x08, 0x89, 0x97, 0xe0, + 0x51, 0x38, 0xf6, 0xb8, 0x23, 0x4d, 0x2f, 0x1c, 0xf7, 0x08, 0xa8, 0x76, 0x9a, 0x65, 0xbd, 0x58, + 0xfe, 0xfb, 0xff, 0x7d, 0xfe, 0xff, 0xfc, 0x19, 0x77, 0x8e, 0x95, 0x28, 0x0c, 0x2f, 0x8e, 0x22, + 0xa9, 0x84, 0x11, 0xa4, 0xb5, 0xd6, 0xc1, 0xf3, 0x7c, 0x62, 0x4e, 0x66, 0x69, 0x94, 0x89, 0x69, + 0x9c, 0x8b, 0x5c, 0xc4, 0xb6, 0x20, 0x9d, 0x1d, 0x5b, 0x65, 0x85, 0xdd, 0xb9, 0xc6, 0xe0, 0x75, + 0xa3, 0xfc, 0x92, 0x8f, 0x2f, 0xf8, 0xa5, 0x50, 0xa7, 0x3a, 0xce, 0xc4, 0x74, 0x2a, 0x8a, 0xf8, + 0xc4, 0x18, 0x99, 0x2b, 0x99, 0xd5, 0x9b, 0xaa, 0xeb, 0x5d, 0xa3, 0x2b, 0x13, 0xca, 0xf0, 0x6f, + 0x52, 0x89, 0xaf, 0x3c, 0x33, 0x95, 0x8a, 0xe5, 0x69, 0x1e, 0x9f, 0xcf, 0xb8, 0x9a, 0x70, 0x15, + 0x6b, 0x33, 0x36, 0xda, 0xad, 0xae, 0x3d, 0xfc, 0x05, 0xd8, 0xff, 0x50, 0x01, 0x27, 0xe2, 0xfd, + 0xd9, 0x84, 0x17, 0x86, 0xbc, 0xc1, 0xed, 0x55, 0x0a, 0xe3, 0xe7, 0x33, 0xae, 0x4d, 0x17, 0x7a, + 0xd0, 0x6f, 0x0f, 0x1e, 0x47, 0x75, 0xf2, 0xc7, 0x24, 0x39, 0xa8, 0x4c, 0xd6, 0xac, 0x24, 0x21, + 0xde, 0x32, 0x57, 0x92, 0x77, 0xef, 0xf5, 0xa0, 0xdf, 0x19, 0x74, 0xa2, 0x7a, 0x34, 0xc9, 0x95, + 0xe4, 0xcc, 0x7a, 0x24, 0xc4, 0x9e, 0x05, 0x18, 0x16, 0xe3, 0xf4, 0x8c, 0x1f, 0x75, 0xef, 0xf7, + 0xa0, 0xdf, 0x62, 0x77, 0xce, 0xc2, 0x1f, 0x80, 0x7d, 0xc7, 0x92, 0x88, 0x35, 0x1d, 0x79, 0x8b, + 0x3d, 0x97, 0xa5, 0xa5, 0x28, 0x34, 0xaf, 0xb0, 0x9e, 0x6c, 0x62, 0x39, 0x97, 0xdd, 0xa9, 0x25, + 0x01, 0x6e, 0x65, 0xf6, 0xbe, 0xfd, 0x5d, 0x0b, 0xf7, 0x88, 0xd5, 0x9a, 0x84, 0xf8, 0x81, 0x0d, + 0xb7, 0x24, 0xed, 0x81, 0x17, 0xb9, 0xf9, 0x8c, 0x56, 0x2b, 0x73, 0xd6, 0xb3, 0xa7, 0x78, 0x6b, + 0xf5, 0x04, 0xe2, 0x63, 0x6f, 0x95, 0x72, 0xc8, 0x86, 0x9f, 0x3e, 0x0f, 0x47, 0x89, 0x8f, 0x08, + 0xc6, 0xdb, 0x7b, 0xc3, 0xe4, 0x70, 0x7f, 0xd7, 0x87, 0xc1, 0x08, 0xb7, 0x6a, 0xda, 0x3d, 0xfc, + 0xf0, 0x40, 0x89, 0x8c, 0x6b, 0x4d, 0x82, 0xdb, 0x39, 0x6c, 0x3e, 0x2a, 0x68, 0x78, 0x9b, 0xdf, + 0x10, 0xa2, 0x3e, 0xbc, 0x80, 0x9d, 0x9d, 0xf9, 0x82, 0xa2, 0xeb, 0x05, 0x45, 0x37, 0x0b, 0x0a, + 0xdf, 0x4b, 0x0a, 0xbf, 0x4b, 0x0a, 0x7f, 0x4a, 0x0a, 0xf3, 0x92, 0xc2, 0xdf, 0x92, 0xc2, 0xbf, + 0x92, 0xa2, 0x9b, 0x92, 0xc2, 0xcf, 0x25, 0x45, 0xf3, 0x25, 0x45, 0xd7, 0x4b, 0x8a, 0xbe, 0x78, + 0xeb, 0x6b, 0x2f, 0x5e, 0xca, 0x34, 0xdd, 0xb6, 0x9f, 0xfd, 0xea, 0x7f, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x97, 0x76, 0xa9, 0x36, 0xac, 0x02, 0x00, 0x00, } func (x Type) String() string { @@ -237,6 +248,9 @@ func (this *FrontendToClient) Equal(that interface{}) bool { if this.Type != that1.Type { return false } + if this.StatsEnabled != that1.StatsEnabled { + return false + } return true } func (this *ClientToFrontend) Equal(that interface{}) bool { @@ -273,12 +287,13 @@ func (this *FrontendToClient) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&frontendv1pb.FrontendToClient{") if this.HttpRequest != nil { s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") } s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") + s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -443,6 +458,16 @@ func (m *FrontendToClient) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StatsEnabled { + i-- + if m.StatsEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } if m.Type != 0 { i = encodeVarintFrontend(dAtA, i, uint64(m.Type)) i-- @@ -541,6 +566,9 @@ func (m *FrontendToClient) Size() (n int) { if m.Type != 0 { n += 1 + sovFrontend(uint64(m.Type)) } + if m.StatsEnabled { + n += 2 + } return n } @@ -578,6 +606,7 @@ func (this *FrontendToClient) String() string { s := strings.Join([]string{`&FrontendToClient{`, `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, `Type:` + fmt.Sprintf("%v", this.Type) + `,`, + `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, `}`, }, "") return s @@ -686,6 +715,26 @@ func (m *FrontendToClient) Unmarshal(dAtA []byte) error { break } } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatsEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFrontend + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StatsEnabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipFrontend(dAtA[iNdEx:]) diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto index c801993578a..0aed412f2e4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v1/frontendv1pb/frontend.proto @@ -27,6 +27,10 @@ enum Type { message FrontendToClient { httpgrpc.HTTPRequest httpRequest = 1; Type type = 2; + + // Whether query statistics tracking should be enabled. The response will include + // statistics only when this option is enabled. + bool statsEnabled = 3; } message ClientToFrontend { diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go index da5942883c6..268eab74267 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend.go @@ -29,10 +29,10 @@ import ( // Config for a Frontend. type Config struct { - SchedulerAddress string `yaml:"scheduler_address"` - DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` - WorkerConcurrency int `yaml:"scheduler_worker_concurrency"` - GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` + SchedulerAddress string `yaml:"scheduler_address"` + DNSLookupPeriod time.Duration `yaml:"scheduler_dns_lookup_period"` + WorkerConcurrency int `yaml:"scheduler_worker_concurrency"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` // Used to find local IP address, that is sent to scheduler and querier-worker. InfNames []string `yaml:"instance_interface_names"` @@ -73,9 +73,10 @@ type Frontend struct { } type frontendRequest struct { - queryID uint64 - request *httpgrpc.HTTPRequest - userID string + queryID uint64 + request *httpgrpc.HTTPRequest + userID string + statsEnabled bool cancel context.CancelFunc @@ -152,10 +153,11 @@ func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) return nil, fmt.Errorf("frontend not running: %v", s) } - userID, err := tenant.TenantID(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, err } + userID := tenant.JoinTenantIDs(tenantIDs) // Propagate trace context in gRPC too - this will be ignored if using HTTP. tracer, span := opentracing.GlobalTracer(), opentracing.SpanFromContext(ctx) @@ -170,9 +172,10 @@ func (f *Frontend) RoundTripGRPC(ctx context.Context, req *httpgrpc.HTTPRequest) defer cancel() freq := &frontendRequest{ - queryID: f.lastQueryID.Inc(), - request: req, - userID: userID, + queryID: f.lastQueryID.Inc(), + request: req, + userID: userID, + statsEnabled: stats.IsEnabled(ctx), cancel: cancel, @@ -239,10 +242,11 @@ enqueueAgain: } func (f *Frontend) QueryResult(ctx context.Context, qrReq *frontendv2pb.QueryResultRequest) (*frontendv2pb.QueryResultResponse, error) { - userID, err := tenant.TenantID(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, err } + userID := tenant.JoinTenantIDs(tenantIDs) req := f.requests.get(qrReq.QueryID) // It is possible that some old response belonging to different user was received, if frontend has restarted. diff --git a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go index 577a0d27abf..1395b9a0bb1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/frontend/v2/frontend_scheduler_worker.go @@ -261,6 +261,7 @@ func (w *frontendSchedulerWorker) schedulerLoop(loop schedulerpb.SchedulerForFro UserID: req.userID, HttpRequest: req.request, FrontendAddress: w.frontendAddr, + StatsEnabled: req.statsEnabled, }) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go index 7ae169785fc..6079d3b7161 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/client.go @@ -55,7 +55,7 @@ func (c *closableHealthAndIngesterClient) Close() error { // Config is the configuration struct for the ingester client type Config struct { - GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` } // RegisterFlags registers configuration settings used by the ingester client config. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go index ca6b265ff7e..a77f8b24a2d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.pb.go @@ -120,9 +120,10 @@ func (MetricMetadata_MetricType) EnumDescriptor() ([]byte, []int) { } type WriteRequest struct { - Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` - Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` - Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"` + Timeseries []PreallocTimeseries `protobuf:"bytes,1,rep,name=timeseries,proto3,customtype=PreallocTimeseries" json:"timeseries"` + Source WriteRequest_SourceEnum `protobuf:"varint,2,opt,name=Source,proto3,enum=cortex.WriteRequest_SourceEnum" json:"Source,omitempty"` + Metadata []*MetricMetadata `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty"` + SkipLabelNameValidation bool `protobuf:"varint,1000,opt,name=skip_label_name_validation,json=skipLabelNameValidation,proto3" json:"skip_label_name_validation,omitempty"` } func (m *WriteRequest) Reset() { *m = WriteRequest{} } @@ -171,6 +172,13 @@ func (m *WriteRequest) GetMetadata() []*MetricMetadata { return nil } +func (m *WriteRequest) GetSkipLabelNameValidation() bool { + if m != nil { + return m.SkipLabelNameValidation + } + return false +} + type WriteResponse struct { } @@ -1638,100 +1646,103 @@ func init() { func init() { proto.RegisterFile("cortex.proto", fileDescriptor_893a47d0a749d749) } var fileDescriptor_893a47d0a749d749 = []byte{ - // 1478 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xcf, 0x6f, 0x1b, 0x45, - 0x14, 0xde, 0xf1, 0xaf, 0xc4, 0xcf, 0x8e, 0xb3, 0x99, 0xa4, 0xad, 0xeb, 0x8a, 0x75, 0x3b, 0x52, - 0x4b, 0x04, 0x34, 0x2d, 0x41, 0x85, 0x1c, 0x40, 0x95, 0xd3, 0x3a, 0xa9, 0x21, 0x76, 0xd2, 0xb1, - 0x43, 0x01, 0x09, 0x59, 0x1b, 0x7b, 0x92, 0xac, 0xba, 0xbb, 0x76, 0xf7, 0x07, 0x22, 0x07, 0x24, - 0x24, 0x8e, 0x1c, 0xe8, 0xb1, 0x7f, 0x02, 0x67, 0x2e, 0xdc, 0x39, 0xf5, 0xd8, 0x63, 0xc5, 0xa1, - 0xa2, 0xee, 0x85, 0x1b, 0x15, 0x7f, 0x01, 0xda, 0x99, 0xd9, 0xf5, 0xae, 0x6b, 0x43, 0x0b, 0xf4, - 0xe6, 0x79, 0xef, 0x9b, 0x6f, 0xde, 0x7e, 0xf3, 0xe6, 0xbd, 0x67, 0x28, 0xf6, 0x06, 0x8e, 0xc7, - 0xbe, 0x5e, 0x1b, 0x3a, 0x03, 0x6f, 0x80, 0x73, 0x62, 0x55, 0xb9, 0x7c, 0x64, 0x78, 0xc7, 0xfe, - 0xc1, 0x5a, 0x6f, 0x60, 0x5d, 0x39, 0x1a, 0x1c, 0x0d, 0xae, 0x70, 0xf7, 0x81, 0x7f, 0xc8, 0x57, - 0x7c, 0xc1, 0x7f, 0x89, 0x6d, 0xe4, 0x4f, 0x04, 0xc5, 0x3b, 0x8e, 0xe1, 0x31, 0xca, 0xee, 0xf9, - 0xcc, 0xf5, 0x70, 0x0b, 0xc0, 0x33, 0x2c, 0xe6, 0x32, 0xc7, 0x60, 0x6e, 0x19, 0x9d, 0x4f, 0xaf, - 0x16, 0xd6, 0xf1, 0x9a, 0x3c, 0xaa, 0x63, 0x58, 0xac, 0xcd, 0x3d, 0x9b, 0x95, 0x87, 0x4f, 0xaa, - 0xca, 0xaf, 0x4f, 0xaa, 0x78, 0xcf, 0x61, 0xba, 0x69, 0x0e, 0x7a, 0x9d, 0x68, 0x17, 0x8d, 0x31, - 0xe0, 0x0f, 0x20, 0xd7, 0x1e, 0xf8, 0x4e, 0x8f, 0x95, 0x53, 0xe7, 0xd1, 0x6a, 0x69, 0xbd, 0x1a, - 0x72, 0xc5, 0x4f, 0x5d, 0x13, 0x90, 0xba, 0xed, 0x5b, 0x54, 0xc2, 0xf1, 0x06, 0xcc, 0x5b, 0xcc, - 0xd3, 0xfb, 0xba, 0xa7, 0x97, 0xd3, 0x3c, 0x8c, 0xd3, 0xe1, 0xd6, 0x26, 0xf3, 0x1c, 0xa3, 0xd7, - 0x94, 0xde, 0xcd, 0xcc, 0xc3, 0x27, 0x55, 0x44, 0x23, 0x34, 0xa9, 0x02, 0x8c, 0xf9, 0xf0, 0x1c, - 0xa4, 0x6b, 0x7b, 0x0d, 0x55, 0xc1, 0xf3, 0x90, 0xa1, 0xfb, 0x3b, 0x75, 0x15, 0x91, 0x45, 0x58, - 0x90, 0xa7, 0xbb, 0xc3, 0x81, 0xed, 0x32, 0xf2, 0x11, 0x14, 0x28, 0xd3, 0xfb, 0xa1, 0x06, 0x6b, - 0x30, 0x77, 0xcf, 0x8f, 0x0b, 0xb0, 0x12, 0x9e, 0x7c, 0xdb, 0x67, 0xce, 0x89, 0x84, 0xd1, 0x10, - 0x44, 0xae, 0x43, 0x51, 0x6c, 0x17, 0x74, 0xf8, 0x0a, 0xcc, 0x39, 0xcc, 0xf5, 0x4d, 0x2f, 0xdc, - 0x7f, 0x6a, 0x62, 0xbf, 0xc0, 0xd1, 0x10, 0x45, 0x1e, 0x20, 0x28, 0xc6, 0xa9, 0xf1, 0x3b, 0x80, - 0x5d, 0x4f, 0x77, 0xbc, 0x2e, 0x57, 0xd2, 0xd3, 0xad, 0x61, 0xd7, 0x0a, 0xc8, 0xd0, 0x6a, 0x9a, - 0xaa, 0xdc, 0xd3, 0x09, 0x1d, 0x4d, 0x17, 0xaf, 0x82, 0xca, 0xec, 0x7e, 0x12, 0x9b, 0xe2, 0xd8, - 0x12, 0xb3, 0xfb, 0x71, 0xe4, 0x55, 0x98, 0xb7, 0x74, 0xaf, 0x77, 0xcc, 0x1c, 0x57, 0x8a, 0x1a, - 0x7d, 0xda, 0x8e, 0x7e, 0xc0, 0xcc, 0xa6, 0x70, 0xd2, 0x08, 0x45, 0x1a, 0xb0, 0x90, 0x08, 0x1a, - 0x6f, 0xbc, 0x64, 0x82, 0x04, 0xb7, 0xa2, 0xc4, 0x53, 0x81, 0xdc, 0x47, 0xb0, 0xcc, 0xb9, 0xda, - 0x9e, 0xc3, 0x74, 0x2b, 0x62, 0xbc, 0x0e, 0x85, 0xde, 0xb1, 0x6f, 0xdf, 0x4d, 0x50, 0x9e, 0x79, - 0x91, 0xf2, 0x46, 0x00, 0x92, 0xbc, 0xf1, 0x1d, 0x13, 0x21, 0xa5, 0x5e, 0x21, 0xa4, 0xef, 0x11, - 0x60, 0xfe, 0xe1, 0x9f, 0xea, 0xa6, 0xcf, 0xdc, 0x50, 0xfe, 0x37, 0x00, 0xcc, 0xc0, 0xda, 0xb5, - 0x75, 0x8b, 0x71, 0xd9, 0xf3, 0x34, 0xcf, 0x2d, 0x2d, 0xdd, 0x62, 0x33, 0x6e, 0x27, 0xf5, 0x0a, - 0xb7, 0x93, 0x9e, 0x76, 0x3b, 0x64, 0x03, 0x96, 0x13, 0xc1, 0x48, 0x7d, 0x2e, 0x40, 0x51, 0x44, - 0xf3, 0x15, 0xb7, 0x73, 0x81, 0xf2, 0xb4, 0x60, 0x8e, 0xa1, 0xe4, 0x2e, 0x2c, 0xed, 0x84, 0xe1, - 0xb9, 0xaf, 0x39, 0x89, 0xc8, 0x35, 0xa9, 0x99, 0x3c, 0x4c, 0x46, 0x59, 0x85, 0xc2, 0x58, 0xb3, - 0x30, 0x48, 0x88, 0x44, 0x73, 0x09, 0x06, 0x75, 0xdf, 0x65, 0x4e, 0xdb, 0xd3, 0xbd, 0x30, 0x44, - 0xf2, 0x33, 0x82, 0xa5, 0x98, 0x51, 0x52, 0x5d, 0x84, 0x92, 0x61, 0x1f, 0x31, 0xd7, 0x33, 0x06, - 0x76, 0xd7, 0xd1, 0x3d, 0x71, 0x05, 0x88, 0x2e, 0x44, 0x56, 0xaa, 0x7b, 0x2c, 0xb8, 0x25, 0xdb, - 0xb7, 0xba, 0xd1, 0xb5, 0xa3, 0xd5, 0x0c, 0xcd, 0xdb, 0xbe, 0x25, 0x6e, 0x3b, 0xf8, 0x7c, 0x7d, - 0x68, 0x74, 0x27, 0x98, 0xd2, 0x9c, 0x49, 0xd5, 0x87, 0x46, 0x23, 0x41, 0xb6, 0x06, 0xcb, 0x8e, - 0x6f, 0xb2, 0x49, 0x78, 0x86, 0xc3, 0x97, 0x02, 0x57, 0x02, 0x4f, 0xbe, 0x84, 0xe5, 0x20, 0xf0, - 0xc6, 0xcd, 0x64, 0xe8, 0x67, 0x60, 0xce, 0x77, 0x99, 0xd3, 0x35, 0xfa, 0x32, 0x6d, 0x72, 0xc1, - 0xb2, 0xd1, 0xc7, 0x97, 0x21, 0xc3, 0x4b, 0x59, 0x10, 0x66, 0x61, 0xfd, 0x6c, 0x98, 0x9d, 0x2f, - 0x7c, 0x3c, 0xe5, 0x30, 0xb2, 0x0d, 0x38, 0x70, 0xb9, 0x49, 0xf6, 0x77, 0x21, 0xeb, 0x06, 0x06, - 0xf9, 0x46, 0xce, 0xc5, 0x59, 0x26, 0x22, 0xa1, 0x02, 0x49, 0x7e, 0x42, 0xa0, 0x89, 0x7a, 0xe9, - 0x6e, 0x0d, 0x9c, 0xf8, 0x23, 0x7f, 0xdd, 0x79, 0x82, 0x37, 0xa0, 0x18, 0x96, 0x91, 0xae, 0xcb, - 0x3c, 0x59, 0x70, 0x4e, 0x4d, 0x2b, 0x38, 0x2e, 0x2d, 0x84, 0xd0, 0x36, 0xf3, 0x48, 0x03, 0xaa, - 0x33, 0x63, 0x96, 0x52, 0x5c, 0x82, 0x9c, 0xc5, 0x21, 0x52, 0x8b, 0x52, 0xb2, 0x39, 0x50, 0xe9, - 0x25, 0x65, 0x38, 0x2d, 0xa9, 0xc2, 0x7e, 0x11, 0xe6, 0x5e, 0x13, 0xce, 0xbc, 0xe0, 0x91, 0xe4, - 0xeb, 0xb1, 0xde, 0x83, 0xfe, 0xae, 0xf7, 0xc4, 0xba, 0xce, 0x2f, 0x08, 0x16, 0x27, 0x6a, 0x55, - 0xa0, 0xd5, 0xa1, 0x33, 0xb0, 0x64, 0x52, 0xc5, 0xd3, 0xa2, 0x14, 0xd8, 0x1b, 0xd2, 0xdc, 0xe8, - 0xc7, 0xf3, 0x26, 0x95, 0xc8, 0x9b, 0xeb, 0x90, 0xe3, 0x6f, 0x28, 0xac, 0xd7, 0x4b, 0x09, 0xf9, - 0xf6, 0x74, 0xc3, 0xd9, 0x5c, 0x91, 0xad, 0xb8, 0xc8, 0x4d, 0xb5, 0xbe, 0x3e, 0xf4, 0x98, 0x43, - 0xe5, 0x36, 0xfc, 0x36, 0xe4, 0x44, 0xad, 0x2c, 0x67, 0x38, 0xc1, 0x42, 0x48, 0x10, 0x2f, 0xa7, - 0x12, 0x42, 0x7e, 0x40, 0x90, 0x15, 0xa1, 0xbf, 0xae, 0xa4, 0xa8, 0xc0, 0x3c, 0xb3, 0x7b, 0x83, - 0xbe, 0x61, 0x1f, 0xf1, 0xb7, 0x98, 0xa5, 0xd1, 0x1a, 0x63, 0xf9, 0x46, 0x82, 0x47, 0x57, 0x94, - 0x0f, 0xa1, 0x0c, 0xa7, 0x3b, 0x8e, 0x6e, 0xbb, 0x87, 0xcc, 0xe1, 0x81, 0x45, 0x19, 0x40, 0xbe, - 0x01, 0x18, 0xeb, 0x1d, 0xd3, 0x09, 0xfd, 0x3b, 0x9d, 0xd6, 0x60, 0xce, 0xd5, 0xad, 0xa1, 0x19, - 0x75, 0x90, 0x28, 0xa3, 0xda, 0xdc, 0x2c, 0x95, 0x0a, 0x41, 0xe4, 0x1a, 0xe4, 0x23, 0xea, 0x20, - 0xf2, 0xa8, 0x55, 0x14, 0x29, 0xff, 0x8d, 0x57, 0x20, 0xcb, 0x0b, 0x36, 0x17, 0xa2, 0x48, 0xc5, - 0x82, 0xd4, 0x20, 0x27, 0xf8, 0xc6, 0x7e, 0x51, 0xdc, 0xc4, 0x22, 0x28, 0xf6, 0x53, 0x54, 0x2c, - 0x78, 0xb1, 0xfa, 0x5b, 0x83, 0x85, 0xc4, 0x9b, 0x48, 0x74, 0x75, 0xf4, 0x52, 0x5d, 0xfd, 0x41, - 0x0a, 0x4a, 0xc9, 0x4c, 0xc6, 0xd7, 0x20, 0xe3, 0x9d, 0x0c, 0x45, 0x34, 0xa5, 0xf5, 0x0b, 0xd3, - 0xf3, 0x5d, 0x2e, 0x3b, 0x27, 0x43, 0x46, 0x39, 0x3c, 0xc8, 0x13, 0xf1, 0xd2, 0xba, 0x87, 0xba, - 0x65, 0x98, 0x27, 0xa2, 0x65, 0x8a, 0x1c, 0x56, 0x85, 0x67, 0x8b, 0x3b, 0x78, 0xe7, 0xc4, 0x90, - 0x39, 0x66, 0xe6, 0x90, 0xdf, 0x70, 0x9e, 0xf2, 0xdf, 0x81, 0xcd, 0xb7, 0x0d, 0xaf, 0x9c, 0x15, - 0xb6, 0xe0, 0x37, 0x39, 0x01, 0x18, 0x9f, 0x84, 0x0b, 0x30, 0xb7, 0xdf, 0xfa, 0xa4, 0xb5, 0x7b, - 0xa7, 0xa5, 0x2a, 0xc1, 0xe2, 0xc6, 0xee, 0x7e, 0xab, 0x53, 0xa7, 0x2a, 0xc2, 0x79, 0xc8, 0x6e, - 0xd7, 0xf6, 0xb7, 0xeb, 0x6a, 0x0a, 0x2f, 0x40, 0xfe, 0x56, 0xa3, 0xdd, 0xd9, 0xdd, 0xa6, 0xb5, - 0xa6, 0x9a, 0xc6, 0x18, 0x4a, 0xdc, 0x33, 0xb6, 0x65, 0x82, 0xad, 0xed, 0xfd, 0x66, 0xb3, 0x46, - 0x3f, 0x57, 0xb3, 0xc1, 0x38, 0xd8, 0x68, 0x6d, 0xed, 0xaa, 0x39, 0x5c, 0x84, 0xf9, 0x76, 0xa7, - 0xd6, 0xa9, 0xb7, 0xeb, 0x1d, 0x75, 0x8e, 0x34, 0x20, 0x27, 0x8e, 0xfe, 0xcf, 0x29, 0x45, 0xba, - 0x50, 0x8c, 0xeb, 0x8f, 0x2f, 0x26, 0x24, 0x8e, 0xe8, 0xb8, 0x3b, 0x26, 0x69, 0x98, 0x4c, 0x42, - 0xc4, 0x89, 0x64, 0x4a, 0x73, 0xa3, 0x4c, 0xa6, 0xef, 0x10, 0x94, 0xc6, 0x6f, 0x60, 0xcb, 0x30, - 0xd9, 0xff, 0x51, 0x72, 0x2a, 0x30, 0x7f, 0x68, 0x98, 0x8c, 0xc7, 0x20, 0x8e, 0x8b, 0xd6, 0xd3, - 0x9e, 0xe8, 0x5b, 0x1f, 0x43, 0x3e, 0xfa, 0x84, 0xe0, 0x46, 0xea, 0xb7, 0xf7, 0x6b, 0x3b, 0xaa, - 0x12, 0xdc, 0x48, 0x6b, 0xb7, 0xd3, 0x15, 0x4b, 0x84, 0x17, 0xa1, 0x40, 0xeb, 0xdb, 0xf5, 0xcf, - 0xba, 0xcd, 0x5a, 0xe7, 0xc6, 0x2d, 0x35, 0x15, 0x5c, 0x91, 0x30, 0xb4, 0x76, 0xa5, 0x2d, 0xbd, - 0xfe, 0x47, 0x16, 0xe6, 0xc3, 0x18, 0x83, 0x94, 0xdc, 0xf3, 0xdd, 0x63, 0xbc, 0x32, 0xed, 0x3f, - 0x43, 0xe5, 0xd4, 0x84, 0x55, 0x96, 0x05, 0x05, 0xbf, 0x0f, 0x59, 0x3e, 0x66, 0xe2, 0xa9, 0x63, - 0x7b, 0x65, 0xfa, 0x30, 0x4e, 0x14, 0x7c, 0x13, 0x0a, 0xb1, 0xf1, 0x74, 0xc6, 0xee, 0x73, 0x09, - 0x6b, 0x72, 0x92, 0x25, 0xca, 0x55, 0x84, 0x6f, 0x41, 0x21, 0x36, 0xc4, 0xe1, 0x4a, 0x22, 0x69, - 0x12, 0x63, 0xe6, 0x98, 0x6b, 0xca, 0xd4, 0x47, 0x14, 0x5c, 0x07, 0x18, 0xcf, 0x59, 0xf8, 0x6c, - 0x02, 0x1c, 0x1f, 0xf4, 0x2a, 0x95, 0x69, 0xae, 0x88, 0x66, 0x13, 0xf2, 0xd1, 0x94, 0x81, 0xcb, - 0x53, 0x06, 0x0f, 0x41, 0x32, 0x7b, 0x24, 0x21, 0x0a, 0xde, 0x82, 0x62, 0xcd, 0x34, 0x5f, 0x86, - 0xa6, 0x12, 0xf7, 0xb8, 0x93, 0x3c, 0x66, 0xd4, 0x73, 0x27, 0x1b, 0x3b, 0xbe, 0x94, 0xac, 0x38, - 0xb3, 0xa6, 0x95, 0xca, 0x9b, 0xff, 0x88, 0x8b, 0x4e, 0xeb, 0xc0, 0xe2, 0x44, 0x87, 0xc7, 0xda, - 0xc4, 0xee, 0x89, 0xa1, 0xa0, 0x52, 0x9d, 0xe9, 0x8f, 0x58, 0x9b, 0x50, 0x4a, 0x76, 0x24, 0x3c, - 0xeb, 0xbf, 0x4a, 0x25, 0x3a, 0x6d, 0x46, 0x0b, 0x53, 0x56, 0xd1, 0xe6, 0x87, 0x8f, 0x9e, 0x6a, - 0xca, 0xe3, 0xa7, 0x9a, 0xf2, 0xfc, 0xa9, 0x86, 0xbe, 0x1d, 0x69, 0xe8, 0xc7, 0x91, 0x86, 0x1e, - 0x8e, 0x34, 0xf4, 0x68, 0xa4, 0xa1, 0xdf, 0x46, 0x1a, 0xfa, 0x7d, 0xa4, 0x29, 0xcf, 0x47, 0x1a, - 0xba, 0xff, 0x4c, 0x53, 0x1e, 0x3d, 0xd3, 0x94, 0xc7, 0xcf, 0x34, 0xe5, 0x8b, 0x5c, 0xcf, 0x34, - 0x98, 0xed, 0x1d, 0xe4, 0xf8, 0xdf, 0xf8, 0xf7, 0xfe, 0x0a, 0x00, 0x00, 0xff, 0xff, 0x0f, 0x73, - 0x78, 0x05, 0x0d, 0x10, 0x00, 0x00, + // 1526 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x58, 0xcf, 0x6f, 0x13, 0xc7, + 0x17, 0xdf, 0xf5, 0x6f, 0x3f, 0x3b, 0xce, 0x66, 0x12, 0x88, 0x31, 0xfa, 0xda, 0x30, 0x12, 0x7c, + 0xa3, 0xb6, 0x04, 0x9a, 0x8a, 0x36, 0x07, 0x2a, 0xe4, 0x80, 0x13, 0xdc, 0x62, 0x27, 0x8c, 0x1d, + 0x68, 0x2b, 0x55, 0xd6, 0xc6, 0x9e, 0x24, 0x2b, 0x76, 0xd7, 0x66, 0x7f, 0xa0, 0xe6, 0x50, 0xa9, + 0x52, 0x8f, 0x3d, 0x94, 0x23, 0x7f, 0x42, 0x8f, 0x55, 0x2f, 0xbd, 0xf7, 0xc4, 0x91, 0x23, 0xea, + 0x01, 0x95, 0x70, 0xe1, 0x56, 0xfe, 0x84, 0x6a, 0x7e, 0xec, 0x7a, 0xd7, 0xd8, 0x2d, 0x94, 0x72, + 0xf3, 0xbc, 0xf7, 0x99, 0xcf, 0xbc, 0x7d, 0xf3, 0x99, 0xf7, 0x5e, 0x02, 0xc5, 0xfe, 0xd0, 0xf1, + 0xe8, 0x37, 0xab, 0x23, 0x67, 0xe8, 0x0d, 0x51, 0x46, 0xac, 0x2a, 0x17, 0x0e, 0x0c, 0xef, 0xd0, + 0xdf, 0x5b, 0xed, 0x0f, 0xad, 0x8b, 0x07, 0xc3, 0x83, 0xe1, 0x45, 0xee, 0xde, 0xf3, 0xf7, 0xf9, + 0x8a, 0x2f, 0xf8, 0x2f, 0xb1, 0x0d, 0xff, 0x9c, 0x80, 0xe2, 0x1d, 0xc7, 0xf0, 0x28, 0xa1, 0xf7, + 0x7c, 0xea, 0x7a, 0xa8, 0x0d, 0xe0, 0x19, 0x16, 0x75, 0xa9, 0x63, 0x50, 0xb7, 0xac, 0x9e, 0x49, + 0xae, 0x14, 0xd6, 0xd0, 0xaa, 0x3c, 0xaa, 0x6b, 0x58, 0xb4, 0xc3, 0x3d, 0x1b, 0x95, 0x47, 0x4f, + 0x6b, 0xca, 0xef, 0x4f, 0x6b, 0x68, 0xc7, 0xa1, 0xba, 0x69, 0x0e, 0xfb, 0xdd, 0x70, 0x17, 0x89, + 0x30, 0xa0, 0x4f, 0x20, 0xd3, 0x19, 0xfa, 0x4e, 0x9f, 0x96, 0x13, 0x67, 0xd4, 0x95, 0xd2, 0x5a, + 0x2d, 0xe0, 0x8a, 0x9e, 0xba, 0x2a, 0x20, 0x0d, 0xdb, 0xb7, 0x88, 0x84, 0xa3, 0x75, 0xc8, 0x59, + 0xd4, 0xd3, 0x07, 0xba, 0xa7, 0x97, 0x93, 0x3c, 0x8c, 0x93, 0xc1, 0xd6, 0x16, 0xf5, 0x1c, 0xa3, + 0xdf, 0x92, 0xde, 0x8d, 0xd4, 0xa3, 0xa7, 0x35, 0x95, 0x84, 0x68, 0x74, 0x05, 0x2a, 0xee, 0x5d, + 0x63, 0xd4, 0x33, 0xf5, 0x3d, 0x6a, 0xf6, 0x6c, 0xdd, 0xa2, 0xbd, 0xfb, 0xba, 0x69, 0x0c, 0x74, + 0xcf, 0x18, 0xda, 0xe5, 0x17, 0xd9, 0x33, 0xea, 0x4a, 0x8e, 0x2c, 0x33, 0xc8, 0x4d, 0x86, 0x68, + 0xeb, 0x16, 0xbd, 0x1d, 0xfa, 0x71, 0x0d, 0x60, 0x1c, 0x0d, 0xca, 0x42, 0xb2, 0xbe, 0xd3, 0xd4, + 0x14, 0x94, 0x83, 0x14, 0xd9, 0xbd, 0xd9, 0xd0, 0x54, 0x3c, 0x0f, 0x73, 0x32, 0x76, 0x77, 0x34, + 0xb4, 0x5d, 0x8a, 0x3f, 0x85, 0x02, 0xa1, 0xfa, 0x20, 0xc8, 0xe0, 0x2a, 0x64, 0xef, 0xf9, 0xd1, + 0xf4, 0x2d, 0x05, 0x71, 0xdf, 0xf2, 0xa9, 0x73, 0x24, 0x61, 0x24, 0x00, 0xe1, 0xab, 0x50, 0x14, + 0xdb, 0x05, 0x1d, 0xba, 0x08, 0x59, 0x87, 0xba, 0xbe, 0xe9, 0x05, 0xfb, 0x4f, 0x4c, 0xec, 0x17, + 0x38, 0x12, 0xa0, 0xf0, 0x43, 0x15, 0x8a, 0x51, 0x6a, 0xf4, 0x01, 0x20, 0xd7, 0xd3, 0x1d, 0xaf, + 0xc7, 0xef, 0xc1, 0xd3, 0xad, 0x51, 0xcf, 0x62, 0x64, 0xea, 0x4a, 0x92, 0x68, 0xdc, 0xd3, 0x0d, + 0x1c, 0x2d, 0x17, 0xad, 0x80, 0x46, 0xed, 0x41, 0x1c, 0x9b, 0xe0, 0xd8, 0x12, 0xb5, 0x07, 0x51, + 0xe4, 0x25, 0xc8, 0x59, 0xba, 0xd7, 0x3f, 0xa4, 0x8e, 0x2b, 0xaf, 0x24, 0xfc, 0x34, 0x9e, 0xc9, + 0x96, 0x70, 0x92, 0x10, 0x85, 0x9b, 0x30, 0x17, 0x0b, 0x1a, 0xad, 0xbf, 0xa6, 0xbc, 0xd8, 0x9d, + 0x2a, 0x51, 0x21, 0xe1, 0x07, 0x2a, 0x2c, 0x72, 0xae, 0x8e, 0xe7, 0x50, 0xdd, 0x0a, 0x19, 0xaf, + 0x42, 0xa1, 0x7f, 0xe8, 0xdb, 0x77, 0x63, 0x94, 0xcb, 0xaf, 0x52, 0x5e, 0x63, 0x20, 0xc9, 0x1b, + 0xdd, 0x31, 0x11, 0x52, 0xe2, 0x0d, 0x42, 0xfa, 0x41, 0x05, 0xc4, 0x3f, 0xfc, 0xb6, 0x6e, 0xfa, + 0xd4, 0x0d, 0xd2, 0xff, 0x3f, 0x80, 0xb1, 0xf4, 0x78, 0xda, 0xf3, 0x24, 0x6f, 0x06, 0x52, 0x9b, + 0x71, 0x3b, 0x89, 0x37, 0xb8, 0x9d, 0xe4, 0xb4, 0xdb, 0xc1, 0xeb, 0xb0, 0x18, 0x0b, 0x46, 0xe6, + 0xe7, 0x2c, 0x14, 0x45, 0x34, 0xf7, 0xb9, 0x9d, 0x27, 0x28, 0x4f, 0x0a, 0xe6, 0x18, 0x8a, 0xef, + 0xc2, 0x42, 0xf8, 0x12, 0xdc, 0x77, 0x2c, 0x22, 0x7c, 0x59, 0xe6, 0x4c, 0x1e, 0x26, 0xa3, 0xac, + 0x41, 0x61, 0x9c, 0xb3, 0x20, 0x48, 0x08, 0x93, 0xe6, 0x62, 0x04, 0xda, 0xae, 0x4b, 0x9d, 0x8e, + 0xa7, 0x7b, 0x41, 0x88, 0xf8, 0x57, 0x15, 0x16, 0x22, 0x46, 0x49, 0x75, 0x0e, 0x4a, 0x86, 0x7d, + 0x40, 0x5d, 0xf6, 0x9a, 0x7b, 0x8e, 0xee, 0x89, 0x2b, 0x50, 0xc9, 0x5c, 0x68, 0x25, 0xba, 0x47, + 0xd9, 0x2d, 0xd9, 0xbe, 0xd5, 0x0b, 0xaf, 0x5d, 0x5d, 0x49, 0x91, 0xbc, 0xed, 0x5b, 0xe2, 0xb6, + 0xd9, 0xe7, 0xeb, 0x23, 0xa3, 0x37, 0xc1, 0x94, 0xe4, 0x4c, 0x9a, 0x3e, 0x32, 0x9a, 0x31, 0xb2, + 0x55, 0x58, 0x74, 0x7c, 0x93, 0x4e, 0xc2, 0x53, 0x1c, 0xbe, 0xc0, 0x5c, 0x31, 0x3c, 0xfe, 0x1a, + 0x16, 0x59, 0xe0, 0xcd, 0xeb, 0xf1, 0xd0, 0x97, 0x21, 0xeb, 0xbb, 0xd4, 0xe9, 0x19, 0x03, 0x29, + 0x9b, 0x0c, 0x5b, 0x36, 0x07, 0xe8, 0x02, 0xa4, 0x78, 0x21, 0x64, 0x61, 0x16, 0xd6, 0x4e, 0x05, + 0xea, 0x7c, 0xe5, 0xe3, 0x09, 0x87, 0xe1, 0x2d, 0x40, 0xcc, 0xe5, 0xc6, 0xd9, 0x3f, 0x84, 0xb4, + 0xcb, 0x0c, 0xf2, 0x8d, 0x9c, 0x8e, 0xb2, 0x4c, 0x44, 0x42, 0x04, 0x12, 0xff, 0xa2, 0x42, 0x55, + 0x54, 0x5b, 0x77, 0x73, 0xe8, 0x44, 0x1f, 0xf9, 0xbb, 0xd6, 0x09, 0x5a, 0x87, 0x62, 0x50, 0x46, + 0x7a, 0x2e, 0xf5, 0x64, 0xc1, 0x39, 0x31, 0xad, 0xe0, 0xb8, 0xa4, 0x10, 0x40, 0x3b, 0xd4, 0xc3, + 0x4d, 0xa8, 0xcd, 0x8c, 0x59, 0xa6, 0xe2, 0x3c, 0x64, 0x2c, 0x0e, 0x91, 0xb9, 0x28, 0xc5, 0x5b, + 0x0b, 0x91, 0x5e, 0x5c, 0x86, 0x93, 0x92, 0x2a, 0xe8, 0x36, 0x81, 0xf6, 0x5a, 0xb0, 0xfc, 0x8a, + 0x47, 0x92, 0xaf, 0x45, 0x3a, 0x97, 0xfa, 0x77, 0x9d, 0x6b, 0xdc, 0xb3, 0xf0, 0x6f, 0x2a, 0xcc, + 0x4f, 0xd4, 0x2a, 0x96, 0xab, 0x7d, 0x67, 0x68, 0x49, 0x51, 0x45, 0x65, 0x51, 0x62, 0xf6, 0xa6, + 0x34, 0x37, 0x07, 0x51, 0xdd, 0x24, 0x62, 0xba, 0xb9, 0x0a, 0x19, 0xfe, 0x86, 0x82, 0x7a, 0xbd, + 0x10, 0x4b, 0xdf, 0x8e, 0x6e, 0x38, 0x1b, 0x4b, 0xb2, 0x91, 0x17, 0xb9, 0xa9, 0x3e, 0xd0, 0x47, + 0x1e, 0x75, 0x88, 0xdc, 0x86, 0xde, 0x87, 0x8c, 0xa8, 0x95, 0xe5, 0x14, 0x27, 0x98, 0x0b, 0x08, + 0xa2, 0xe5, 0x54, 0x42, 0xf0, 0x8f, 0x2a, 0xa4, 0x45, 0xe8, 0xef, 0x4a, 0x14, 0x15, 0xc8, 0x51, + 0xbb, 0x3f, 0x1c, 0x18, 0xf6, 0x01, 0x7f, 0x8b, 0x69, 0x12, 0xae, 0x11, 0x92, 0x6f, 0x84, 0x3d, + 0xba, 0xa2, 0x7c, 0x08, 0x65, 0x38, 0xd9, 0x75, 0x74, 0xdb, 0xdd, 0xa7, 0x0e, 0x0f, 0x2c, 0x54, + 0x00, 0xfe, 0x16, 0x60, 0x9c, 0xef, 0x48, 0x9e, 0xd4, 0x7f, 0x97, 0xa7, 0x55, 0xc8, 0xba, 0xba, + 0x35, 0x32, 0xc3, 0x0e, 0x12, 0x2a, 0xaa, 0xc3, 0xcd, 0x32, 0x53, 0x01, 0x08, 0x5f, 0x86, 0x7c, + 0x48, 0xcd, 0x22, 0x0f, 0x5b, 0x45, 0x91, 0xf0, 0xdf, 0x68, 0x09, 0xd2, 0xbc, 0x60, 0xf3, 0x44, + 0x14, 0x89, 0x58, 0xe0, 0x3a, 0x64, 0x04, 0xdf, 0xd8, 0x2f, 0x8a, 0x9b, 0x58, 0xb0, 0x62, 0x3f, + 0x25, 0x8b, 0x05, 0x2f, 0x52, 0x7f, 0xeb, 0x30, 0x17, 0x7b, 0x13, 0xb1, 0xae, 0xae, 0xbe, 0x56, + 0x57, 0x7f, 0x98, 0x80, 0x52, 0x5c, 0xc9, 0xe8, 0x32, 0xa4, 0xbc, 0xa3, 0x91, 0x88, 0xa6, 0xb4, + 0x76, 0x76, 0xba, 0xde, 0xe5, 0xb2, 0x7b, 0x34, 0xa2, 0x84, 0xc3, 0x99, 0x4e, 0xc4, 0x4b, 0xeb, + 0xed, 0xeb, 0x96, 0x61, 0x1e, 0x89, 0x96, 0x29, 0x34, 0xac, 0x09, 0xcf, 0x26, 0x77, 0xf0, 0xce, + 0x89, 0x20, 0x75, 0x48, 0xcd, 0x11, 0xbf, 0xe1, 0x3c, 0xe1, 0xbf, 0x99, 0xcd, 0xb7, 0x0d, 0xaf, + 0x9c, 0x16, 0x36, 0xf6, 0x1b, 0x1f, 0x01, 0x8c, 0x4f, 0x42, 0x05, 0xc8, 0xee, 0xb6, 0x3f, 0x6f, + 0x6f, 0xdf, 0x69, 0x6b, 0x0a, 0x5b, 0x5c, 0xdb, 0xde, 0x6d, 0x77, 0x1b, 0x44, 0x53, 0x51, 0x1e, + 0xd2, 0x5b, 0xf5, 0xdd, 0xad, 0x86, 0x96, 0x40, 0x73, 0x90, 0xbf, 0xd1, 0xec, 0x74, 0xb7, 0xb7, + 0x48, 0xbd, 0xa5, 0x25, 0x11, 0x82, 0x12, 0xf7, 0x8c, 0x6d, 0x29, 0xb6, 0xb5, 0xb3, 0xdb, 0x6a, + 0xd5, 0xc9, 0x97, 0x5a, 0x9a, 0x8d, 0x83, 0xcd, 0xf6, 0xe6, 0xb6, 0x96, 0x41, 0x45, 0xc8, 0x75, + 0xba, 0xf5, 0x6e, 0xa3, 0xd3, 0xe8, 0x6a, 0x59, 0xdc, 0x84, 0x8c, 0x38, 0xfa, 0xad, 0x25, 0x85, + 0x7b, 0x50, 0x8c, 0xe6, 0x1f, 0x9d, 0x8b, 0xa5, 0x38, 0xa4, 0xe3, 0xee, 0x48, 0x4a, 0x03, 0x31, + 0x89, 0x24, 0x4e, 0x88, 0x29, 0xc9, 0x8d, 0x52, 0x4c, 0xdf, 0xab, 0x50, 0x1a, 0xbf, 0x81, 0x4d, + 0xc3, 0xa4, 0xff, 0x45, 0xc9, 0xa9, 0x40, 0x6e, 0xdf, 0x30, 0x29, 0x8f, 0x41, 0x1c, 0x17, 0xae, + 0xa7, 0x3d, 0xd1, 0xf7, 0x3e, 0x83, 0x7c, 0xf8, 0x09, 0xec, 0x46, 0x1a, 0xb7, 0x76, 0xeb, 0x37, + 0x35, 0x85, 0xdd, 0x48, 0x7b, 0xbb, 0xdb, 0x13, 0x4b, 0x15, 0xcd, 0x43, 0x81, 0x34, 0xb6, 0x1a, + 0x5f, 0xf4, 0x5a, 0xf5, 0xee, 0xb5, 0x1b, 0x5a, 0x82, 0x5d, 0x91, 0x30, 0xb4, 0xb7, 0xa5, 0x2d, + 0xb9, 0xf6, 0x67, 0x1a, 0x72, 0x41, 0x8c, 0x4c, 0x92, 0x3b, 0xbe, 0x7b, 0x88, 0x96, 0xa6, 0xfd, + 0xc5, 0x51, 0x39, 0x31, 0x61, 0x95, 0x65, 0x41, 0x41, 0x1f, 0x43, 0x9a, 0x8f, 0x99, 0x68, 0xea, + 0xd8, 0x5e, 0x99, 0x3e, 0x8c, 0x63, 0x05, 0x5d, 0x87, 0x42, 0x64, 0x3c, 0x9d, 0xb1, 0xfb, 0x74, + 0xcc, 0x1a, 0x9f, 0x64, 0xb1, 0x72, 0x49, 0x45, 0x37, 0xa0, 0x10, 0x19, 0xe2, 0x50, 0x25, 0x26, + 0x9a, 0xd8, 0x98, 0x39, 0xe6, 0x9a, 0x32, 0xf5, 0x61, 0x05, 0x35, 0x00, 0xc6, 0x73, 0x16, 0x3a, + 0x15, 0x03, 0x47, 0x07, 0xbd, 0x4a, 0x65, 0x9a, 0x2b, 0xa4, 0xd9, 0x80, 0x7c, 0x38, 0x65, 0xa0, + 0xf2, 0x94, 0xc1, 0x43, 0x90, 0xcc, 0x1e, 0x49, 0xb0, 0x82, 0x36, 0xa1, 0x58, 0x37, 0xcd, 0xd7, + 0xa1, 0xa9, 0x44, 0x3d, 0xee, 0x24, 0x8f, 0x19, 0xf6, 0xdc, 0xc9, 0xc6, 0x8e, 0xce, 0xc7, 0x2b, + 0xce, 0xac, 0x69, 0xa5, 0xf2, 0xff, 0x7f, 0xc4, 0x85, 0xa7, 0x75, 0x61, 0x7e, 0xa2, 0xc3, 0xa3, + 0xea, 0xc4, 0xee, 0x89, 0xa1, 0xa0, 0x52, 0x9b, 0xe9, 0x0f, 0x59, 0x5b, 0x50, 0x8a, 0x77, 0x24, + 0x34, 0xeb, 0x6f, 0x95, 0x4a, 0x78, 0xda, 0x8c, 0x16, 0xa6, 0xac, 0xa8, 0x6b, 0x4d, 0xd0, 0x98, + 0xc8, 0xb7, 0x6d, 0xf3, 0xe8, 0x2d, 0x85, 0xbf, 0x71, 0xe5, 0xf1, 0xb3, 0xaa, 0xf2, 0xe4, 0x59, + 0x55, 0x79, 0xf9, 0xac, 0xaa, 0x7e, 0x77, 0x5c, 0x55, 0x7f, 0x3a, 0xae, 0xaa, 0x8f, 0x8e, 0xab, + 0xea, 0xe3, 0xe3, 0xaa, 0xfa, 0xc7, 0x71, 0x55, 0x7d, 0x71, 0x5c, 0x55, 0x5e, 0x1e, 0x57, 0xd5, + 0x07, 0xcf, 0xab, 0xca, 0xe3, 0xe7, 0x55, 0xe5, 0xc9, 0xf3, 0xaa, 0xf2, 0x55, 0xa6, 0x6f, 0x1a, + 0xd4, 0xf6, 0xf6, 0x32, 0xfc, 0xff, 0x09, 0x1f, 0xfd, 0x15, 0x00, 0x00, 0xff, 0xff, 0x57, 0x48, + 0x9d, 0xc0, 0x96, 0x10, 0x00, 0x00, } func (x MatchType) String() string { @@ -1793,6 +1804,9 @@ func (this *WriteRequest) Equal(that interface{}) bool { return false } } + if this.SkipLabelNameValidation != that1.SkipLabelNameValidation { + return false + } return true } func (this *WriteResponse) Equal(that interface{}) bool { @@ -2660,13 +2674,14 @@ func (this *WriteRequest) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 8) s = append(s, "&client.WriteRequest{") s = append(s, "Timeseries: "+fmt.Sprintf("%#v", this.Timeseries)+",\n") s = append(s, "Source: "+fmt.Sprintf("%#v", this.Source)+",\n") if this.Metadata != nil { s = append(s, "Metadata: "+fmt.Sprintf("%#v", this.Metadata)+",\n") } + s = append(s, "SkipLabelNameValidation: "+fmt.Sprintf("%#v", this.SkipLabelNameValidation)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -3510,6 +3525,78 @@ var _Ingester_serviceDesc = grpc.ServiceDesc{ Metadata: "cortex.proto", } +// PushOnlyIngesterClient is the client API for PushOnlyIngester service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type PushOnlyIngesterClient interface { + Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) +} + +type pushOnlyIngesterClient struct { + cc *grpc.ClientConn +} + +func NewPushOnlyIngesterClient(cc *grpc.ClientConn) PushOnlyIngesterClient { + return &pushOnlyIngesterClient{cc} +} + +func (c *pushOnlyIngesterClient) Push(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { + out := new(WriteResponse) + err := c.cc.Invoke(ctx, "/cortex.PushOnlyIngester/Push", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// PushOnlyIngesterServer is the server API for PushOnlyIngester service. +type PushOnlyIngesterServer interface { + Push(context.Context, *WriteRequest) (*WriteResponse, error) +} + +// UnimplementedPushOnlyIngesterServer can be embedded to have forward compatible implementations. +type UnimplementedPushOnlyIngesterServer struct { +} + +func (*UnimplementedPushOnlyIngesterServer) Push(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Push not implemented") +} + +func RegisterPushOnlyIngesterServer(s *grpc.Server, srv PushOnlyIngesterServer) { + s.RegisterService(&_PushOnlyIngester_serviceDesc, srv) +} + +func _PushOnlyIngester_Push_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PushOnlyIngesterServer).Push(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cortex.PushOnlyIngester/Push", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PushOnlyIngesterServer).Push(ctx, req.(*WriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _PushOnlyIngester_serviceDesc = grpc.ServiceDesc{ + ServiceName: "cortex.PushOnlyIngester", + HandlerType: (*PushOnlyIngesterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Push", + Handler: _PushOnlyIngester_Push_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "cortex.proto", +} + func (m *WriteRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3530,6 +3617,18 @@ func (m *WriteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.SkipLabelNameValidation { + i-- + if m.SkipLabelNameValidation { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x3e + i-- + dAtA[i] = 0xc0 + } if len(m.Metadata) > 0 { for iNdEx := len(m.Metadata) - 1; iNdEx >= 0; iNdEx-- { { @@ -4730,6 +4829,9 @@ func (m *WriteRequest) Size() (n int) { n += 1 + l + sovCortex(uint64(l)) } } + if m.SkipLabelNameValidation { + n += 3 + } return n } @@ -5246,6 +5348,7 @@ func (this *WriteRequest) String() string { `Timeseries:` + fmt.Sprintf("%v", this.Timeseries) + `,`, `Source:` + fmt.Sprintf("%v", this.Source) + `,`, `Metadata:` + repeatedStringForMetadata + `,`, + `SkipLabelNameValidation:` + fmt.Sprintf("%v", this.SkipLabelNameValidation) + `,`, `}`, }, "") return s @@ -5754,6 +5857,26 @@ func (m *WriteRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 1000: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SkipLabelNameValidation", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCortex + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.SkipLabelNameValidation = bool(v != 0) default: iNdEx = preIndex skippy, err := skipCortex(dAtA[iNdEx:]) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto index ebc0f92aa1c..acba85a29f5 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/client/cortex.proto @@ -25,6 +25,10 @@ service Ingester { rpc TransferChunks(stream TimeSeriesChunk) returns (TransferChunksResponse) {}; } +service PushOnlyIngester { + rpc Push(WriteRequest) returns (WriteResponse) {}; +} + message WriteRequest { repeated TimeSeries timeseries = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "PreallocTimeseries"]; enum SourceEnum { @@ -33,6 +37,7 @@ message WriteRequest { } SourceEnum Source = 2; repeated MetricMetadata metadata = 3 [(gogoproto.nullable) = true]; + bool skip_label_name_validation = 1000; //set intentionally high to keep WriteRequest compatible with upstream Prometheus } message WriteResponse {} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go index fd26a08da5e..bdfd0101b11 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/flush.go @@ -14,6 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -32,9 +33,9 @@ func (i *Ingester) Flush() { return } - level.Info(util.Logger).Log("msg", "starting to flush all the chunks") + level.Info(i.logger).Log("msg", "starting to flush all the chunks") i.sweepUsers(true) - level.Info(util.Logger).Log("msg", "chunks queued for flushing") + level.Info(i.logger).Log("msg", "chunks queued for flushing") // Close the flush queues, to unblock waiting workers. for _, flushQueue := range i.flushQueues { @@ -42,7 +43,7 @@ func (i *Ingester) Flush() { } i.flushQueuesDone.Wait() - level.Info(util.Logger).Log("msg", "flushing of chunks complete") + level.Info(i.logger).Log("msg", "flushing of chunks complete") } // FlushHandler triggers a flush of all in memory chunks. Mainly used for @@ -53,9 +54,9 @@ func (i *Ingester) FlushHandler(w http.ResponseWriter, r *http.Request) { return } - level.Info(util.Logger).Log("msg", "starting to flush all the chunks") + level.Info(i.logger).Log("msg", "starting to flush all the chunks") i.sweepUsers(true) - level.Info(util.Logger).Log("msg", "chunks queued for flushing") + level.Info(i.logger).Log("msg", "chunks queued for flushing") w.WriteHeader(http.StatusNoContent) } @@ -114,7 +115,7 @@ func (i *Ingester) setFlushRate() { if flushesPerSecond*i.cfg.FlushCheckPeriod.Seconds() < minFlushes { flushesPerSecond = minFlushes / i.cfg.FlushCheckPeriod.Seconds() } - level.Debug(util.Logger).Log("msg", "computed flush rate", "rate", flushesPerSecond) + level.Debug(i.logger).Log("msg", "computed flush rate", "rate", flushesPerSecond) i.flushRateLimiter.SetLimit(rate.Limit(flushesPerSecond)) } @@ -246,7 +247,7 @@ func (i *Ingester) shouldFlushChunk(c *desc, fp model.Fingerprint, lastValueIsSt func (i *Ingester) flushLoop(j int) { defer func() { - level.Debug(util.Logger).Log("msg", "Ingester.flushLoop() exited") + level.Debug(i.logger).Log("msg", "Ingester.flushLoop() exited") i.flushQueuesDone.Done() }() @@ -263,7 +264,7 @@ func (i *Ingester) flushLoop(j int) { outcome, err := i.flushUserSeries(j, op.userID, op.fp, op.immediate) i.metrics.seriesDequeuedOutcome.WithLabelValues(outcome.String()).Inc() if err != nil { - level.Error(util.WithUserID(op.userID, util.Logger)).Log("msg", "failed to flush user", "err", err) + level.Error(log.WithUserID(op.userID, i.logger)).Log("msg", "failed to flush user", "err", err) } // If we're exiting & we failed to flush, put the failed operation diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go index 09196efd596..a0a49b0607c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester.go @@ -9,6 +9,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/gogo/status" "github.com/pkg/errors" @@ -26,6 +27,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/tsdb" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + logutil "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" @@ -50,7 +52,7 @@ var ( // Config for an Ingester. type Config struct { - WALConfig WALConfig `yaml:"walconfig"` + WALConfig WALConfig `yaml:"walconfig" doc:"description=Configures the Write-Ahead Log (WAL) for the Cortex chunks storage. This config is ignored when running the Cortex blocks storage."` LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler"` // Config for transferring chunks. Zero or negative = no retries. @@ -123,6 +125,7 @@ type Ingester struct { clientConfig client.Config metrics *ingesterMetrics + logger log.Logger chunkStore ChunkStore lifecycler *ring.Lifecycler @@ -165,13 +168,13 @@ type ChunkStore interface { } // New constructs a new Ingester. -func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, chunkStore ChunkStore, registerer prometheus.Registerer) (*Ingester, error) { +func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, chunkStore ChunkStore, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { if cfg.ingesterClientFactory == nil { cfg.ingesterClientFactory = client.MakeIngesterClient } if cfg.BlocksStorageEnabled { - return NewV2(cfg, clientConfig, limits, registerer) + return NewV2(cfg, clientConfig, limits, registerer, logger) } if cfg.WALConfig.WALEnabled { @@ -207,6 +210,7 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c flushRateLimiter: rate.NewLimiter(rate.Inf, 1), usersMetadata: map[string]*userMetricsMetadata{}, registerer: registerer, + logger: logger, } var err error @@ -235,24 +239,24 @@ func New(cfg Config, clientConfig client.Config, limits *validation.Overrides, c func (i *Ingester) starting(ctx context.Context) error { if i.cfg.WALConfig.Recover { - level.Info(util.Logger).Log("msg", "recovering from WAL") + level.Info(i.logger).Log("msg", "recovering from WAL") start := time.Now() if err := recoverFromWAL(i); err != nil { - level.Error(util.Logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) + level.Error(i.logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) return errors.Wrap(err, "failed to recover from WAL") } elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) + level.Info(i.logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) i.metrics.walReplayDuration.Set(elapsed.Seconds()) } // If the WAL recover happened, then the userStates would already be set. if i.userStates == nil { - i.userStates = newUserStates(i.limiter, i.cfg, i.metrics) + i.userStates = newUserStates(i.limiter, i.cfg, i.metrics, i.logger) } var err error - i.wal, err = newWAL(i.cfg.WALConfig, i.userStates.cp, i.registerer) + i.wal, err = newWAL(i.cfg.WALConfig, i.userStates.cp, i.registerer, i.logger) if err != nil { return errors.Wrap(err, "starting WAL") } @@ -283,9 +287,9 @@ func (i *Ingester) startFlushLoops() { // Compared to the 'New' method: // * Always replays the WAL. // * Does not start the lifecycler. -func NewForFlusher(cfg Config, chunkStore ChunkStore, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { +func NewForFlusher(cfg Config, chunkStore ChunkStore, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { if cfg.BlocksStorageEnabled { - return NewV2ForFlusher(cfg, registerer) + return NewV2ForFlusher(cfg, registerer, logger) } i := &Ingester{ @@ -296,6 +300,7 @@ func NewForFlusher(cfg Config, chunkStore ChunkStore, limits *validation.Overrid flushRateLimiter: rate.NewLimiter(rate.Inf, 1), wal: &noopWAL{}, limits: limits, + logger: logger, } i.BasicService = services.NewBasicService(i.startingForFlusher, i.loopForFlusher, i.stopping) @@ -303,17 +308,17 @@ func NewForFlusher(cfg Config, chunkStore ChunkStore, limits *validation.Overrid } func (i *Ingester) startingForFlusher(ctx context.Context) error { - level.Info(util.Logger).Log("msg", "recovering from WAL") + level.Info(i.logger).Log("msg", "recovering from WAL") // We recover from WAL always. start := time.Now() if err := recoverFromWAL(i); err != nil { - level.Error(util.Logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) + level.Error(i.logger).Log("msg", "failed to recover from WAL", "time", time.Since(start).String()) return err } elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) + level.Info(i.logger).Log("msg", "recovery from WAL completed", "time", elapsed.String()) i.metrics.walReplayDuration.Set(elapsed.Seconds()) i.startFlushLoops() @@ -605,7 +610,7 @@ func (i *Ingester) pushMetadata(ctx context.Context, userID string, metadata []* // If we have any error with regard to metadata we just log and no-op. // We consider metadata a best effort approach, errors here should not stop processing. if firstMetadataErr != nil { - logger := util.WithContext(ctx, util.Logger) + logger := logutil.WithContext(ctx, i.logger) level.Warn(logger).Log("msg", "failed to ingest some metadata", "err", firstMetadataErr) } } @@ -647,6 +652,19 @@ func (i *Ingester) getUserMetadata(userID string) *userMetricsMetadata { return i.usersMetadata[userID] } +func (i *Ingester) deleteUserMetadata(userID string) { + i.usersMetadataMtx.Lock() + um := i.usersMetadata[userID] + delete(i.usersMetadata, userID) + i.usersMetadataMtx.Unlock() + + if um != nil { + // We need call purge to update i.metrics.memMetadata correctly (it counts number of metrics with metadata in memory). + // Passing zero time means purge everything. + um.purge(time.Time{}) + } +} + func (i *Ingester) getUsersWithMetadata() []string { i.usersMetadataMtx.RLock() defer i.usersMetadataMtx.RUnlock() @@ -750,8 +768,8 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ return i.v2QueryStream(req, stream) } - log, ctx := spanlogger.New(stream.Context(), "QueryStream") - defer log.Finish() + spanLog, ctx := spanlogger.New(stream.Context(), "QueryStream") + defer spanLog.Finish() from, through, matchers, err := client.FromQueryRequest(req) if err != nil { @@ -819,8 +837,8 @@ func (i *Ingester) QueryStream(req *client.QueryRequest, stream client.Ingester_ i.metrics.queriedSeries.Observe(float64(numSeries)) i.metrics.queriedChunks.Observe(float64(numChunks)) - level.Debug(log).Log("streams", numSeries) - level.Debug(log).Log("chunks", numChunks) + level.Debug(spanLog).Log("streams", numSeries) + level.Debug(spanLog).Log("chunks", numChunks) return err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go index d87396ca3d2..aaadd488cfd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/ingester_v2.go @@ -11,6 +11,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" @@ -35,6 +36,7 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/concurrency" "github.com/cortexproject/cortex/pkg/util/extract" + logutil "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" @@ -52,7 +54,8 @@ type Shipper interface { type tsdbState int const ( - active tsdbState = iota // Pushes are allowed only in this state. + active tsdbState = iota // Pushes are allowed. + activeShipping // Pushes are allowed. Blocks shipping is in progress. forceCompacting // TSDB is being force-compacted. closing // Used while closing idle TSDB. closed // Used to avoid setting closing back to active in closeAndDeleteIdleUsers method. @@ -89,7 +92,7 @@ type userTSDB struct { stateMtx sync.RWMutex state tsdbState - pushesInFlight sync.WaitGroup // Increased with Read lock held, only if state == active. + pushesInFlight sync.WaitGroup // Increased with stateMtx read lock held, only if state == active or activeShipping. // Used to detect idle TSDBs. lastUpdate atomic.Int64 @@ -107,6 +110,10 @@ type userTSDB struct { // for statistics ingestedAPISamples *ewmaRate ingestedRuleSamples *ewmaRate + + // Cached shipped blocks. + shippedBlocksMtx sync.Mutex + shippedBlocks map[ulid.ULID]struct{} } // Explicitly wrapping the tsdb.DB functions that we use. @@ -153,7 +160,7 @@ func (u *userTSDB) casState(from, to tsdbState) bool { // compactHead compacts the Head block at specified block durations avoiding a single huge block. func (u *userTSDB) compactHead(blockDuration int64) error { if !u.casState(active, forceCompacting) { - return errors.New("TSDB head cannot be compacted because it is not in active state (possibly being closed)") + return errors.New("TSDB head cannot be compacted because it is not in active state (possibly being closed or blocks shipping in progress)") } defer u.casState(forceCompacting, active) @@ -236,15 +243,10 @@ func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { return deletable } - shippedBlocks, err := u.getShippedBlocks() - if err != nil { - // If there is any issue with the shipper, we should be conservative and not delete anything. - level.Error(util.Logger).Log("msg", "failed to read shipper meta during deletion of blocks", "user", u.userID, "err", err) - return nil - } + shippedBlocks := u.getCachedShippedBlocks() result := map[ulid.ULID]struct{}{} - for _, shippedID := range shippedBlocks { + for shippedID := range shippedBlocks { if _, ok := deletable[shippedID]; ok { result[shippedID] = struct{}{} } @@ -252,13 +254,56 @@ func (u *userTSDB) blocksToDelete(blocks []*tsdb.Block) map[ulid.ULID]struct{} { return result } -func (u *userTSDB) getShippedBlocks() ([]ulid.ULID, error) { +// updateCachedShipperBlocks reads the shipper meta file and updates the cached shipped blocks. +func (u *userTSDB) updateCachedShippedBlocks() error { shipperMeta, err := shipper.ReadMetaFile(u.db.Dir()) - if err != nil { - return nil, err + if os.IsNotExist(err) { + // If the meta file doesn't exist it means the shipper hasn't run yet. + shipperMeta = &shipper.Meta{} + } else if err != nil { + return err + } + + // Build a map. + shippedBlocks := make(map[ulid.ULID]struct{}, len(shipperMeta.Uploaded)) + for _, blockID := range shipperMeta.Uploaded { + shippedBlocks[blockID] = struct{}{} + } + + // Cache it. + u.shippedBlocksMtx.Lock() + u.shippedBlocks = shippedBlocks + u.shippedBlocksMtx.Unlock() + + return nil +} + +// getCachedShippedBlocks returns the cached shipped blocks. +func (u *userTSDB) getCachedShippedBlocks() map[ulid.ULID]struct{} { + u.shippedBlocksMtx.Lock() + defer u.shippedBlocksMtx.Unlock() + + // It's safe to directly return the map because it's never updated in-place. + return u.shippedBlocks +} + +// getOldestUnshippedBlockTime returns the unix timestamp with milliseconds precision of the oldest +// TSDB block not shipped to the storage yet, or 0 if all blocks have been shipped. +func (u *userTSDB) getOldestUnshippedBlockTime() uint64 { + shippedBlocks := u.getCachedShippedBlocks() + oldestTs := uint64(0) + + for _, b := range u.Blocks() { + if _, ok := shippedBlocks[b.Meta().ULID]; ok { + continue + } + + if oldestTs == 0 || b.Meta().ULID.Time() < oldestTs { + oldestTs = b.Meta().ULID.Time() + } } - return shipperMeta.Uploaded, nil + return oldestTs } func (u *userTSDB) isIdle(now time.Time, idle time.Duration) bool { @@ -286,21 +331,9 @@ func (u *userTSDB) shouldCloseTSDB(idleTimeout time.Duration) (tsdbCloseCheckRes return tsdbNotCompacted, nil } - // Verify that all blocks have been shipped. - shipped, err := u.getShippedBlocks() - if err != nil { - return tsdbCheckFailed, errors.Wrapf(err, "failed to read shipper meta") - } - - shippedMap := make(map[ulid.ULID]bool, len(shipped)) - for _, b := range shipped { - shippedMap[b] = true - } - - for _, b := range u.Blocks() { - if !shippedMap[b.Meta().ULID] { - return tsdbNotShipped, nil - } + // Ensure that all blocks have been shipped. + if oldest := u.getOldestUnshippedBlockTime(); oldest > 0 { + return tsdbNotShipped, nil } return tsdbIdle, nil @@ -390,8 +423,8 @@ func newTSDBState(bucketClient objstore.Bucket, registerer prometheus.Registerer } // NewV2 returns a new Ingester that uses Cortex block storage instead of chunks storage. -func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer) (*Ingester, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) +func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { + bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") } @@ -405,16 +438,23 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, usersMetadata: map[string]*userMetricsMetadata{}, wal: &noopWAL{}, TSDBState: newTSDBState(bucketClient, registerer), + logger: logger, } // Replace specific metrics which we can't directly track but we need to read // them from the underlying system (ie. TSDB). if registerer != nil { registerer.Unregister(i.metrics.memSeries) + promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ Name: "cortex_ingester_memory_series", Help: "The current number of series in memory.", - }, i.numSeriesInTSDB) + }, i.getMemorySeriesMetric) + + promauto.With(registerer).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_ingester_oldest_unshipped_block_timestamp_seconds", + Help: "Unix timestamp of the oldest TSDB block not shipped to the storage yet. 0 if ingester has no blocks or all blocks have been shipped.", + }, i.getOldestUnshippedBlockMetric) } i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", ring.IngesterRingKey, cfg.BlocksStorageConfig.TSDB.FlushBlocksOnShutdown, registerer) @@ -433,8 +473,6 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, cfg.LifecyclerConfig.RingConfig.ReplicationFactor, cfg.LifecyclerConfig.RingConfig.ZoneAwarenessEnabled) - i.userStates = newUserStates(i.limiter, cfg, i.metrics) - i.TSDBState.shipperIngesterID = i.lifecycler.ID i.BasicService = services.NewBasicService(i.startingV2, i.updateLoop, i.stoppingV2) @@ -443,8 +481,8 @@ func NewV2(cfg Config, clientConfig client.Config, limits *validation.Overrides, // Special version of ingester used by Flusher. This ingester is not ingesting anything, its only purpose is to react // on Flush method and flush all openened TSDBs when called. -func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, error) { - bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", util.Logger, registerer) +func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer, logger log.Logger) (*Ingester, error) { + bucketClient, err := bucket.NewClient(context.Background(), cfg.BlocksStorageConfig.Bucket, "ingester", logger, registerer) if err != nil { return nil, errors.Wrap(err, "failed to create the bucket client") } @@ -454,6 +492,7 @@ func NewV2ForFlusher(cfg Config, registerer prometheus.Registerer) (*Ingester, e metrics: newIngesterMetrics(registerer, false, false), wal: &noopWAL{}, TSDBState: newTSDBState(bucketClient, registerer), + logger: logger, } i.TSDBState.shipperIngesterID = "flusher" @@ -498,7 +537,7 @@ func (i *Ingester) startingV2(ctx context.Context) error { compactionService := services.NewBasicService(nil, i.compactionLoop, nil) servs = append(servs, compactionService) - if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { shippingService := services.NewBasicService(nil, i.shipBlocksLoop, nil) servs = append(servs, shippingService) } @@ -534,12 +573,12 @@ func (i *Ingester) stoppingV2(_ error) error { // there's no shipping on-going. if err := services.StopManagerAndAwaitStopped(context.Background(), i.TSDBState.subservices); err != nil { - level.Warn(util.Logger).Log("msg", "failed to stop ingester subservices", "err", err) + level.Warn(i.logger).Log("msg", "failed to stop ingester subservices", "err", err) } // Next initiate our graceful exit from the ring. if err := services.StopAndAwaitTerminated(context.Background(), i.lifecycler); err != nil { - level.Warn(util.Logger).Log("msg", "failed to stop ingester lifecycler", "err", err) + level.Warn(i.logger).Log("msg", "failed to stop ingester lifecycler", "err", err) } if !i.cfg.BlocksStorageConfig.TSDB.KeepUserTSDBOpenOnShutdown { @@ -742,7 +781,7 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien // The error looks an issue on our side, so we should rollback if rollbackErr := app.Rollback(); rollbackErr != nil { - level.Warn(util.Logger).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr) + level.Warn(i.logger).Log("msg", "failed to rollback on error", "user", userID, "err", rollbackErr) } return nil, wrapWithUser(err, userID) @@ -768,7 +807,10 @@ func (i *Ingester) v2Push(ctx context.Context, req *client.WriteRequest) (*clien } i.TSDBState.appenderCommitDuration.Observe(time.Since(startCommit).Seconds()) - db.setLastUpdate(time.Now()) + // If only invalid samples are pushed, don't change "last update", as TSDB was not modified. + if succeededSamplesCount > 0 { + db.setLastUpdate(time.Now()) + } // Increment metrics only if the samples have been successfully committed. // If the code didn't reach this point, it means that we returned an error @@ -801,15 +843,16 @@ func (u *userTSDB) acquireAppendLock() error { u.stateMtx.RLock() defer u.stateMtx.RUnlock() - if u.state != active { - switch u.state { - case forceCompacting: - return errors.New("forced compaction in progress") - case closing: - return errors.New("TSDB is closing") - default: - return errors.New("TSDB is not active") - } + switch u.state { + case active: + case activeShipping: + // Pushes are allowed. + case forceCompacting: + return errors.New("forced compaction in progress") + case closing: + return errors.New("TSDB is closing") + default: + return errors.New("TSDB is not active") } u.pushesInFlight.Add(1) @@ -1048,8 +1091,8 @@ const queryStreamBatchMessageSize = 1 * 1024 * 1024 // v2QueryStream streams metrics from a TSDB. This implements the client.IngesterServer interface func (i *Ingester) v2QueryStream(req *client.QueryRequest, stream client.Ingester_QueryStreamServer) error { - log, ctx := spanlogger.New(stream.Context(), "v2QueryStream") - defer log.Finish() + spanlog, ctx := spanlogger.New(stream.Context(), "v2QueryStream") + defer spanlog.Finish() userID, err := tenant.TenantID(ctx) if err != nil { @@ -1136,7 +1179,7 @@ func (i *Ingester) v2QueryStream(req *client.QueryRequest, stream client.Ingeste i.metrics.queriedSeries.Observe(float64(numSeries)) i.metrics.queriedSamples.Observe(float64(numSamples)) - level.Debug(log).Log("series", numSeries, "samples", numSamples) + level.Debug(spanlog).Log("series", numSeries, "samples", numSamples) return nil } @@ -1204,7 +1247,7 @@ func (i *Ingester) getOrCreateTSDB(userID string, force bool) (*userTSDB, error) func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { tsdbPromReg := prometheus.NewRegistry() udir := i.cfg.BlocksStorageConfig.TSDB.BlocksDir(userID) - userLogger := util.WithUserID(userID, util.Logger) + userLogger := logutil.WithUserID(userID, i.logger) blockRanges := i.cfg.BlocksStorageConfig.TSDB.BlockRanges.ToMilliseconds() @@ -1272,7 +1315,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } // Create a new shipper for this database - if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { userDB.shipper = shipper.New( userLogger, tsdbPromReg, @@ -1283,6 +1326,11 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { false, // No need to upload compacted blocks. Cortex compactor takes care of that. true, // Allow out of order uploads. It's fine in Cortex's context. ) + + // Initialise the shipper blocks cache. + if err := userDB.updateCachedShippedBlocks(); err != nil { + level.Error(userLogger).Log("msg", "failed to update cached shipped blocks after shipper initialisation", "err", err) + } } i.TSDBState.tsdbMetrics.setRegistryForUser(userID, tsdbPromReg) @@ -1303,7 +1351,7 @@ func (i *Ingester) closeAllTSDB() { defer wg.Done() if err := db.Close(); err != nil { - level.Warn(util.Logger).Log("msg", "unable to close TSDB", "err", err, "user", userID) + level.Warn(i.logger).Log("msg", "unable to close TSDB", "err", err, "user", userID) return } @@ -1314,6 +1362,9 @@ func (i *Ingester) closeAllTSDB() { i.userStatesMtx.Lock() delete(i.TSDBState.dbs, userID) i.userStatesMtx.Unlock() + + i.metrics.memUsers.Dec() + i.metrics.activeSeriesPerUser.DeleteLabelValues(userID) }(userDB) } @@ -1325,7 +1376,7 @@ func (i *Ingester) closeAllTSDB() { // openExistingTSDB walks the user tsdb dir, and opens a tsdb for each user. This may start a WAL replay, so we limit the number of // concurrently opening TSDB. func (i *Ingester) openExistingTSDB(ctx context.Context) error { - level.Info(util.Logger).Log("msg", "opening existing TSDBs") + level.Info(i.logger).Log("msg", "opening existing TSDBs") queue := make(chan string) group, groupCtx := errgroup.WithContext(ctx) @@ -1338,7 +1389,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { db, err := i.createTSDB(userID) if err != nil { - level.Error(util.Logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) + level.Error(i.logger).Log("msg", "unable to open TSDB", "err", err, "user", userID) return errors.Wrapf(err, "unable to open TSDB for user %s", userID) } @@ -1367,7 +1418,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return filepath.SkipDir } - level.Error(util.Logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) + level.Error(i.logger).Log("msg", "an error occurred while iterating the filesystem storing TSDBs", "path", path, "err", err) return errors.Wrapf(err, "an error occurred while iterating the filesystem storing TSDBs at %s", path) } @@ -1380,7 +1431,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { userID := info.Name() f, err := os.Open(path) if err != nil { - level.Error(util.Logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) + level.Error(i.logger).Log("msg", "unable to open TSDB dir", "err", err, "user", userID, "path", path) return errors.Wrapf(err, "unable to open TSDB dir %s for user %s", path, userID) } defer f.Close() @@ -1391,7 +1442,7 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { return filepath.SkipDir } - level.Error(util.Logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) + level.Error(i.logger).Log("msg", "unable to read TSDB dir", "err", err, "user", userID, "path", path) return errors.Wrapf(err, "unable to read TSDB dir %s for user %s", path, userID) } @@ -1414,16 +1465,16 @@ func (i *Ingester) openExistingTSDB(ctx context.Context) error { // Wait for all workers to complete. err := group.Wait() if err != nil { - level.Error(util.Logger).Log("msg", "error while opening existing TSDBs", "err", err) + level.Error(i.logger).Log("msg", "error while opening existing TSDBs", "err", err) return err } - level.Info(util.Logger).Log("msg", "successfully opened existing TSDBs") + level.Info(i.logger).Log("msg", "successfully opened existing TSDBs") return nil } -// numSeriesInTSDB returns the total number of in-memory series across all open TSDBs. -func (i *Ingester) numSeriesInTSDB() float64 { +// getMemorySeriesMetric returns the total number of in-memory series across all open TSDBs. +func (i *Ingester) getMemorySeriesMetric() float64 { i.userStatesMtx.RLock() defer i.userStatesMtx.RUnlock() @@ -1435,8 +1486,27 @@ func (i *Ingester) numSeriesInTSDB() float64 { return float64(count) } +// getOldestUnshippedBlockMetric returns the unix timestamp of the oldest unshipped block or +// 0 if all blocks have been shipped. +func (i *Ingester) getOldestUnshippedBlockMetric() float64 { + i.userStatesMtx.RLock() + defer i.userStatesMtx.RUnlock() + + oldest := uint64(0) + for _, db := range i.TSDBState.dbs { + if ts := db.getOldestUnshippedBlockTime(); oldest == 0 || ts < oldest { + oldest = ts + } + } + + return float64(oldest / 1000) +} + func (i *Ingester) shipBlocksLoop(ctx context.Context) error { - shipTicker := time.NewTicker(i.cfg.BlocksStorageConfig.TSDB.ShipInterval) + // We add a slight jitter to make sure that if the head compaction interval and ship interval are set to the same + // value they don't clash (if they both continuously run at the same exact time, the head compaction may not run + // because can't successfully change the state). + shipTicker := time.NewTicker(util.DurationWithJitter(i.cfg.BlocksStorageConfig.TSDB.ShipInterval, 0.01)) defer shipTicker.Stop() for { @@ -1466,7 +1536,7 @@ func (i *Ingester) shipBlocks(ctx context.Context) { // run the shipper in such state we could end up with race conditions. if i.lifecycler != nil { if ingesterState := i.lifecycler.GetState(); ingesterState == ring.PENDING || ingesterState == ring.JOINING { - level.Info(util.Logger).Log("msg", "TSDB blocks shipping has been skipped because of the current ingester state", "state", ingesterState) + level.Info(i.logger).Log("msg", "TSDB blocks shipping has been skipped because of the current ingester state", "state", ingesterState) return } } @@ -1492,20 +1562,39 @@ func (i *Ingester) shipBlocks(ctx context.Context) { if err != nil { // If we cannot check for deletion mark, we continue anyway, even though in production shipper will likely fail too. // This however simplifies unit tests, where tenant deletion check is enabled by default, but tests don't setup bucket. - level.Warn(util.Logger).Log("msg", "failed to check for tenant deletion mark before shipping blocks", "user", userID, "err", err) + level.Warn(i.logger).Log("msg", "failed to check for tenant deletion mark before shipping blocks", "user", userID, "err", err) } else if deletionMarkExists { userDB.deletionMarkFound.Store(true) - level.Info(util.Logger).Log("msg", "tenant deletion mark exists, not shipping blocks", "user", userID) + level.Info(i.logger).Log("msg", "tenant deletion mark exists, not shipping blocks", "user", userID) return nil } } - // Run the shipper's Sync() to upload unshipped blocks. - if uploaded, err := userDB.shipper.Sync(ctx); err != nil { - level.Warn(util.Logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err) + // Run the shipper's Sync() to upload unshipped blocks. Make sure the TSDB state is active, in order to + // avoid any race condition with closing idle TSDBs. + if !userDB.casState(active, activeShipping) { + level.Info(i.logger).Log("msg", "shipper skipped because the TSDB is not active", "user", userID) + return nil + } + defer userDB.casState(activeShipping, active) + + uploaded, err := userDB.shipper.Sync(ctx) + if err != nil { + level.Warn(i.logger).Log("msg", "shipper failed to synchronize TSDB blocks with the storage", "user", userID, "uploaded", uploaded, "err", err) } else { - level.Debug(util.Logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded) + level.Debug(i.logger).Log("msg", "shipper successfully synchronized TSDB blocks with storage", "user", userID, "uploaded", uploaded) + } + + // The shipper meta file could be updated even if the Sync() returned an error, + // so it's safer to update it each time at least a block has been uploaded. + // Moreover, the shipper meta file could be updated even if no blocks are uploaded + // (eg. blocks removed due to retention) but doesn't cause any harm not updating + // the cached list of blocks in such case, so we're not handling it. + if uploaded > 0 { + if err := userDB.updateCachedShippedBlocks(); err != nil { + level.Error(i.logger).Log("msg", "failed to update cached shipped blocks after shipper synchronisation", "user", userID, "err", err) + } } return nil @@ -1543,7 +1632,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { // Compaction loop is not running in LEAVING state, so if we get here in LEAVING state, we're flushing blocks. if i.lifecycler != nil { if ingesterState := i.lifecycler.GetState(); ingesterState == ring.JOINING { - level.Info(util.Logger).Log("msg", "TSDB blocks compaction has been skipped because of the current ingester state", "state", ingesterState) + level.Info(i.logger).Log("msg", "TSDB blocks compaction has been skipped because of the current ingester state", "state", ingesterState) return } } @@ -1572,7 +1661,7 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { case i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout > 0 && userDB.isIdle(time.Now(), i.cfg.BlocksStorageConfig.TSDB.HeadCompactionIdleTimeout): reason = "idle" - level.Info(util.Logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID) + level.Info(i.logger).Log("msg", "TSDB is idle, forcing compaction", "user", userID) err = userDB.compactHead(i.cfg.BlocksStorageConfig.TSDB.BlockRanges[0].Milliseconds()) default: @@ -1582,9 +1671,9 @@ func (i *Ingester) compactBlocks(ctx context.Context, force bool) { if err != nil { i.TSDBState.compactionsFailed.Inc() - level.Warn(util.Logger).Log("msg", "TSDB blocks compaction for user has failed", "user", userID, "err", err, "compactReason", reason) + level.Warn(i.logger).Log("msg", "TSDB blocks compaction for user has failed", "user", userID, "err", err, "compactReason", reason) } else { - level.Debug(util.Logger).Log("msg", "TSDB blocks compaction completed successfully", "user", userID, "compactReason", reason) + level.Debug(i.logger).Log("msg", "TSDB blocks compaction completed successfully", "user", userID, "compactReason", reason) } return nil @@ -1614,12 +1703,12 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes if result, err := userDB.shouldCloseTSDB(i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout); !result.shouldClose() { if err != nil { - level.Error(util.Logger).Log("msg", "cannot close idle TSDB", "user", userID, "err", err) + level.Error(i.logger).Log("msg", "cannot close idle TSDB", "user", userID, "err", err) } return result } - // This disables pushes and force-compactions. + // This disables pushes and force-compactions. Not allowed to close while shipping is in progress. if !userDB.casState(active, closing) { return tsdbNotActive } @@ -1634,7 +1723,7 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes tenantDeleted := false if result, err := userDB.shouldCloseTSDB(i.cfg.BlocksStorageConfig.TSDB.CloseIdleTSDBTimeout); !result.shouldClose() { if err != nil { - level.Error(util.Logger).Log("msg", "cannot close idle TSDB", "user", userID, "err", err) + level.Error(i.logger).Log("msg", "cannot close idle TSDB", "user", userID, "err", err) } return result } else if result == tsdbTenantMarkedForDeletion { @@ -1644,11 +1733,11 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes dir := userDB.db.Dir() if err := userDB.Close(); err != nil { - level.Error(util.Logger).Log("msg", "failed to close idle TSDB", "user", userID, "err", err) + level.Error(i.logger).Log("msg", "failed to close idle TSDB", "user", userID, "err", err) return tsdbCloseFailed } - level.Info(util.Logger).Log("msg", "closed idle TSDB", "user", userID) + level.Info(i.logger).Log("msg", "closed idle TSDB", "user", userID) // This will prevent going back to "active" state in deferred statement. userDB.casState(closing, closed) @@ -1657,20 +1746,26 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes delete(i.TSDBState.dbs, userID) i.userStatesMtx.Unlock() + i.metrics.memUsers.Dec() i.TSDBState.tsdbMetrics.removeRegistryForUser(userID) + i.deleteUserMetadata(userID) + i.metrics.deletePerUserMetrics(userID) + + validation.DeletePerUserValidationMetrics(userID, i.logger) + // And delete local data. if err := os.RemoveAll(dir); err != nil { - level.Error(util.Logger).Log("msg", "failed to delete local TSDB", "user", userID, "err", err) + level.Error(i.logger).Log("msg", "failed to delete local TSDB", "user", userID, "err", err) return tsdbDataRemovalFailed } if tenantDeleted { - level.Info(util.Logger).Log("msg", "deleted local TSDB, user marked for deletion", "user", userID, "dir", dir) + level.Info(i.logger).Log("msg", "deleted local TSDB, user marked for deletion", "user", userID, "dir", dir) return tsdbTenantMarkedForDeletion } - level.Info(util.Logger).Log("msg", "deleted local TSDB, due to being idle", "user", userID, "dir", dir) + level.Info(i.logger).Log("msg", "deleted local TSDB, due to being idle", "user", userID, "dir", dir) return tsdbIdleClosed } @@ -1681,16 +1776,16 @@ func (i *Ingester) closeAndDeleteUserTSDBIfIdle(userID string) tsdbCloseCheckRes // // When used from flusher, ingester is constructed in a way that compaction, shipping and receiving of samples is never started. func (i *Ingester) v2LifecyclerFlush() { - level.Info(util.Logger).Log("msg", "starting to flush and ship TSDB blocks") + level.Info(i.logger).Log("msg", "starting to flush and ship TSDB blocks") ctx := context.Background() i.compactBlocks(ctx, true) - if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { + if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { i.shipBlocks(ctx) } - level.Info(util.Logger).Log("msg", "finished flushing and shipping TSDB blocks") + level.Info(i.logger).Log("msg", "finished flushing and shipping TSDB blocks") } // Blocks version of Flush handler. It force-compacts blocks, and triggers shipping. @@ -1698,52 +1793,52 @@ func (i *Ingester) v2FlushHandler(w http.ResponseWriter, _ *http.Request) { go func() { ingCtx := i.BasicService.ServiceContext() if ingCtx == nil || ingCtx.Err() != nil { - level.Info(util.Logger).Log("msg", "flushing TSDB blocks: ingester not running, ignoring flush request") + level.Info(i.logger).Log("msg", "flushing TSDB blocks: ingester not running, ignoring flush request") return } ch := make(chan struct{}, 1) - level.Info(util.Logger).Log("msg", "flushing TSDB blocks: triggering compaction") + level.Info(i.logger).Log("msg", "flushing TSDB blocks: triggering compaction") select { case i.TSDBState.forceCompactTrigger <- ch: // Compacting now. case <-ingCtx.Done(): - level.Warn(util.Logger).Log("msg", "failed to compact TSDB blocks, ingester not running anymore") + level.Warn(i.logger).Log("msg", "failed to compact TSDB blocks, ingester not running anymore") return } // Wait until notified about compaction being finished. select { case <-ch: - level.Info(util.Logger).Log("msg", "finished compacting TSDB blocks") + level.Info(i.logger).Log("msg", "finished compacting TSDB blocks") case <-ingCtx.Done(): - level.Warn(util.Logger).Log("msg", "failed to compact TSDB blocks, ingester not running anymore") + level.Warn(i.logger).Log("msg", "failed to compact TSDB blocks, ingester not running anymore") return } - if i.cfg.BlocksStorageConfig.TSDB.ShipInterval > 0 { - level.Info(util.Logger).Log("msg", "flushing TSDB blocks: triggering shipping") + if i.cfg.BlocksStorageConfig.TSDB.IsBlocksShippingEnabled() { + level.Info(i.logger).Log("msg", "flushing TSDB blocks: triggering shipping") select { case i.TSDBState.shipTrigger <- ch: // shipping now case <-ingCtx.Done(): - level.Warn(util.Logger).Log("msg", "failed to ship TSDB blocks, ingester not running anymore") + level.Warn(i.logger).Log("msg", "failed to ship TSDB blocks, ingester not running anymore") return } // Wait until shipping finished. select { case <-ch: - level.Info(util.Logger).Log("msg", "shipping of TSDB blocks finished") + level.Info(i.logger).Log("msg", "shipping of TSDB blocks finished") case <-ingCtx.Done(): - level.Warn(util.Logger).Log("msg", "failed to ship TSDB blocks, ingester not running anymore") + level.Warn(i.logger).Log("msg", "failed to ship TSDB blocks, ingester not running anymore") return } } - level.Info(util.Logger).Log("msg", "flushing TSDB blocks: finished") + level.Info(i.logger).Log("msg", "flushing TSDB blocks: finished") }() w.WriteHeader(http.StatusNoContent) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go index fdb631eb084..0fc187e43e8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/limiter.go @@ -5,6 +5,7 @@ import ( "math" "github.com/cortexproject/cortex/pkg/util" + util_math "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -220,7 +221,7 @@ func (l *Limiter) convertGlobalToLocalLimit(userID string, globalLimit int) int // be written to more ingesters than it. if shardSize := l.getShardSize(userID); shardSize > 0 { // We use Min() to protect from the case the expected shard size is > available ingesters. - numIngesters = util.Min(numIngesters, util.ShuffleShardExpectedInstances(shardSize, l.getNumZones())) + numIngesters = util_math.Min(numIngesters, util.ShuffleShardExpectedInstances(shardSize, l.getNumZones())) } return int((float64(globalLimit) / float64(numIngesters)) * float64(l.replicationFactor)) @@ -236,7 +237,7 @@ func (l *Limiter) getShardSize(userID string) int { func (l *Limiter) getNumZones() int { if l.zoneAwarenessEnabled { - return util.Max(l.ring.ZonesCount(), 1) + return util_math.Max(l.ring.ZonesCount(), 1) } return 1 } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go index 18977e7176e..96e4a17ff07 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/mapper.go @@ -6,11 +6,10 @@ import ( "strings" "sync" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/common/model" "go.uber.org/atomic" - - "github.com/cortexproject/cortex/pkg/util" ) const maxMappedFP = 1 << 20 // About 1M fingerprints reserved for mapping. @@ -30,14 +29,17 @@ type fpMapper struct { mappings fpMappings fpToSeries *seriesMap + + logger log.Logger } // newFPMapper loads the collision map from the persistence and // returns an fpMapper ready to use. -func newFPMapper(fpToSeries *seriesMap) *fpMapper { +func newFPMapper(fpToSeries *seriesMap, logger log.Logger) *fpMapper { return &fpMapper{ fpToSeries: fpToSeries, mappings: map[model.Fingerprint]map[string]model.Fingerprint{}, + logger: logger, } } @@ -105,7 +107,7 @@ func (m *fpMapper) maybeAddMapping( // A new mapping has to be created. mappedFP = m.nextMappedFP() mappedFPs[ms] = mappedFP - level.Debug(util.Logger).Log( + level.Debug(m.logger).Log( "msg", "fingerprint collision detected, mapping to new fingerprint", "old_fp", fp, "new_fp", mappedFP, @@ -119,7 +121,7 @@ func (m *fpMapper) maybeAddMapping( m.mtx.Lock() m.mappings[fp] = mappedFPs m.mtx.Unlock() - level.Debug(util.Logger).Log( + level.Debug(m.logger).Log( "msg", "fingerprint collision detected, mapping to new fingerprint", "old_fp", fp, "new_fp", mappedFP, diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go index 8b60c8dc0ae..38bb69b45fe 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/metrics.go @@ -216,6 +216,20 @@ func newIngesterMetrics(r prometheus.Registerer, createMetricsConflictingWithTSD return m } +func (m *ingesterMetrics) deletePerUserMetrics(userID string) { + m.memMetadataCreatedTotal.DeleteLabelValues(userID) + m.memMetadataRemovedTotal.DeleteLabelValues(userID) + m.activeSeriesPerUser.DeleteLabelValues(userID) + + if m.memSeriesCreatedTotal != nil { + m.memSeriesCreatedTotal.DeleteLabelValues(userID) + } + + if m.memSeriesRemovedTotal != nil { + m.memSeriesRemovedTotal.DeleteLabelValues(userID) + } +} + // TSDB metrics collector. Each tenant has its own registry, that TSDB code uses. type tsdbMetrics struct { // Metrics aggregated from Thanos shipper. diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go index b9b87bda7e0..e905242905a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/transfer.go @@ -53,7 +53,7 @@ func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream clien // round this loop. if fromIngesterID == "" { fromIngesterID = wireSeries.FromIngesterId - level.Info(util.Logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) + level.Info(i.logger).Log("msg", "processing TransferChunks request", "from_ingester", fromIngesterID) // Before transfer, make sure 'from' ingester is in correct state to call ClaimTokensFor later err := i.checkFromIngesterIsInLeavingState(stream.Context(), fromIngesterID) @@ -90,13 +90,13 @@ func (i *Ingester) fillUserStatesFromStream(userStates *userStates, stream clien } if seriesReceived == 0 { - level.Error(util.Logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) + level.Error(i.logger).Log("msg", "received TransferChunks request with no series", "from_ingester", fromIngesterID) retErr = fmt.Errorf("TransferChunks: no series") return } if fromIngesterID == "" { - level.Error(util.Logger).Log("msg", "received TransferChunks request with no ID from ingester") + level.Error(i.logger).Log("msg", "received TransferChunks request with no ID from ingester") retErr = fmt.Errorf("no ingester id") return } @@ -115,7 +115,7 @@ func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) e seriesReceived := 0 xfer := func() error { - userStates := newUserStates(i.limiter, i.cfg, i.metrics) + userStates := newUserStates(i.limiter, i.cfg, i.metrics, i.logger) var err error fromIngesterID, seriesReceived, err = i.fillUserStatesFromStream(userStates, stream) @@ -139,10 +139,10 @@ func (i *Ingester) TransferChunks(stream client.Ingester_TransferChunksServer) e // Close the stream last, as this is what tells the "from" ingester that // it's OK to shut down. if err := stream.SendAndClose(&client.TransferChunksResponse{}); err != nil { - level.Error(util.Logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) + level.Error(i.logger).Log("msg", "Error closing TransferChunks stream", "from_ingester", fromIngesterID, "err", err) return err } - level.Info(util.Logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) + level.Info(i.logger).Log("msg", "Successfully transferred chunks", "from_ingester", fromIngesterID, "series_received", seriesReceived) return nil } @@ -186,12 +186,12 @@ func (i *Ingester) transfer(ctx context.Context, xfer func() error) error { return } - level.Error(util.Logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state) + level.Error(i.logger).Log("msg", "TransferChunks failed, not in ACTIVE state.", "state", state) // Enter PENDING state (only valid from JOINING) if i.lifecycler.GetState() == ring.JOINING { if err := i.lifecycler.ChangeState(ctx, ring.PENDING); err != nil { - level.Error(util.Logger).Log("msg", "error rolling back failed TransferChunks", "err", err) + level.Error(i.logger).Log("msg", "error rolling back failed TransferChunks", "err", err) os.Exit(1) } } @@ -267,7 +267,7 @@ func fromWireChunks(wireChunks []client.Chunk) ([]*desc, error) { func (i *Ingester) TransferOut(ctx context.Context) error { // The blocks storage doesn't support blocks transferring. if i.cfg.BlocksStorageEnabled { - level.Info(util.Logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage") + level.Info(i.logger).Log("msg", "transfer between a LEAVING ingester and a PENDING one is not supported for the blocks storage") return ring.ErrTransferDisabled } @@ -287,23 +287,23 @@ func (i *Ingester) TransferOut(ctx context.Context) error { for backoff.Ongoing() { err = i.transferOut(ctx) if err == nil { - level.Info(util.Logger).Log("msg", "transfer successfully completed") + level.Info(i.logger).Log("msg", "transfer successfully completed") return nil } - level.Warn(util.Logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries) + level.Warn(i.logger).Log("msg", "transfer attempt failed", "err", err, "attempt", backoff.NumRetries()+1, "max_retries", i.cfg.MaxTransferRetries) backoff.Wait() } - level.Error(util.Logger).Log("msg", "all transfer attempts failed", "err", err) + level.Error(i.logger).Log("msg", "all transfer attempts failed", "err", err) return backoff.Err() } func (i *Ingester) transferOut(ctx context.Context) error { userStatesCopy := i.userStates.cp() if len(userStatesCopy) == 0 { - level.Info(util.Logger).Log("msg", "nothing to transfer") + level.Info(i.logger).Log("msg", "nothing to transfer") return nil } @@ -312,7 +312,7 @@ func (i *Ingester) transferOut(ctx context.Context) error { return fmt.Errorf("cannot find ingester to transfer chunks to: %w", err) } - level.Info(util.Logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr) + level.Info(i.logger).Log("msg", "sending chunks", "to_ingester", targetIngester.Addr) c, err := i.cfg.ingesterClientFactory(targetIngester.Addr, i.clientConfig) if err != nil { return err @@ -367,12 +367,12 @@ func (i *Ingester) transferOut(ctx context.Context) error { } i.flushQueuesDone.Wait() - level.Info(util.Logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr) + level.Info(i.logger).Log("msg", "successfully sent chunks", "to_ingester", targetIngester.Addr) return nil } // findTargetIngester finds an ingester in PENDING state. -func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.IngesterDesc, error) { +func (i *Ingester) findTargetIngester(ctx context.Context) (*ring.InstanceDesc, error) { ringDesc, err := i.lifecycler.KVStore.Get(ctx, i.lifecycler.RingKey) if err != nil { return nil, err diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go index 53e9b245882..3bbb8d83d3f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_metrics_metadata.go @@ -63,6 +63,7 @@ func (mm *userMetricsMetadata) add(metric string, metadata *client.MetricMetadat return nil } +// If deadline is zero, all metadata is purged. func (mm *userMetricsMetadata) purge(deadline time.Time) { mm.mtx.Lock() defer mm.mtx.Unlock() @@ -93,10 +94,11 @@ func (mm *userMetricsMetadata) toClientMetadata() []*client.MetricMetadata { type metricMetadataSet map[client.MetricMetadata]time.Time +// If deadline is zero time, all metrics are purged. func (mms metricMetadataSet) purge(deadline time.Time) int { var deleted int for metadata, t := range mms { - if deadline.After(t) { + if deadline.IsZero() || deadline.After(t) { delete(mms, metadata) deleted++ } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go index 9e6835d585d..b676fbe25ed 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/user_state.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" @@ -31,6 +32,7 @@ type userStates struct { limiter *Limiter cfg Config metrics *ingesterMetrics + logger log.Logger } type userState struct { @@ -43,6 +45,7 @@ type userState struct { ingestedAPISamples *ewmaRate ingestedRuleSamples *ewmaRate activeSeries *ActiveSeries + logger log.Logger seriesInMetric *metricCounter @@ -61,11 +64,12 @@ const ( perMetricSeriesLimit = "per_metric_series_limit" ) -func newUserStates(limiter *Limiter, cfg Config, metrics *ingesterMetrics) *userStates { +func newUserStates(limiter *Limiter, cfg Config, metrics *ingesterMetrics, logger log.Logger) *userStates { return &userStates{ limiter: limiter, cfg: cfg, metrics: metrics, + logger: logger, } } @@ -128,6 +132,7 @@ func (us *userStates) getOrCreate(userID string) *userState { state, ok := us.get(userID) if !ok { + logger := log.With(us.logger, "user", userID) // Speculatively create a userState object and try to store it // in the map. Another goroutine may have got there before // us, in which case this userState will be discarded @@ -140,6 +145,7 @@ func (us *userStates) getOrCreate(userID string) *userState { ingestedAPISamples: newEWMARate(0.2, us.cfg.RateUpdatePeriod), ingestedRuleSamples: newEWMARate(0.2, us.cfg.RateUpdatePeriod), seriesInMetric: newMetricCounter(us.limiter), + logger: logger, memSeries: us.metrics.memSeries, memSeriesCreatedTotal: us.metrics.memSeriesCreatedTotal.WithLabelValues(userID), @@ -150,7 +156,7 @@ func (us *userStates) getOrCreate(userID string) *userState { activeSeries: NewActiveSeries(), activeSeriesGauge: us.metrics.activeSeriesPerUser.WithLabelValues(userID), } - state.mapper = newFPMapper(state.fpToSeries) + state.mapper = newFPMapper(state.fpToSeries, logger) stored, ok := us.states.LoadOrStore(userID, state) if !ok { us.metrics.memUsers.Inc() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go index 8ec6b6abd9f..17d613028df 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ingester/wal.go @@ -13,6 +13,7 @@ import ( "sync" "time" + "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/gogo/protobuf/proto" "github.com/pkg/errors" @@ -26,7 +27,6 @@ import ( "github.com/prometheus/prometheus/tsdb/wal" "github.com/cortexproject/cortex/pkg/ingester/client" - "github.com/cortexproject/cortex/pkg/util" ) // WALConfig is config for the Write Ahead Log. @@ -88,6 +88,8 @@ type walWrapper struct { checkpointMtx sync.Mutex bytesPool sync.Pool + logger log.Logger + // Metrics. checkpointDeleteFail prometheus.Counter checkpointDeleteTotal prometheus.Counter @@ -100,7 +102,7 @@ type walWrapper struct { } // newWAL creates a WAL object. If the WAL is disabled, then the returned WAL is a no-op WAL. -func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, registerer prometheus.Registerer) (WAL, error) { +func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, registerer prometheus.Registerer, logger log.Logger) (WAL, error) { if !cfg.WALEnabled { return &noopWAL{}, nil } @@ -109,7 +111,7 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register if registerer != nil { walRegistry = prometheus.WrapRegistererWith(prometheus.Labels{"kind": "wal"}, registerer) } - tsdbWAL, err := wal.NewSize(util.Logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false) + tsdbWAL, err := wal.NewSize(logger, walRegistry, cfg.Dir, wal.DefaultSegmentSize/4, false) if err != nil { return nil, err } @@ -124,6 +126,7 @@ func newWAL(cfg WALConfig, userStatesFunc func() map[string]*userState, register return make([]byte, 0, 512) }, }, + logger: logger, } w.checkpointDeleteFail = promauto.With(registerer).NewCounter(prometheus.CounterOpts{ @@ -219,19 +222,19 @@ func (w *walWrapper) run() { select { case <-ticker.C: start := time.Now() - level.Info(util.Logger).Log("msg", "starting checkpoint") + level.Info(w.logger).Log("msg", "starting checkpoint") if err := w.performCheckpoint(false); err != nil { - level.Error(util.Logger).Log("msg", "error checkpointing series", "err", err) + level.Error(w.logger).Log("msg", "error checkpointing series", "err", err) continue } elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "checkpoint done", "time", elapsed.String()) + level.Info(w.logger).Log("msg", "checkpoint done", "time", elapsed.String()) w.checkpointDuration.Observe(elapsed.Seconds()) case <-w.quit: if w.cfg.checkpointDuringShutdown { - level.Info(util.Logger).Log("msg", "creating checkpoint before shutdown") + level.Info(w.logger).Log("msg", "creating checkpoint before shutdown") if err := w.performCheckpoint(true); err != nil { - level.Error(util.Logger).Log("msg", "error checkpointing series during shutdown", "err", err) + level.Error(w.logger).Log("msg", "error checkpointing series during shutdown", "err", err) } } return @@ -292,7 +295,7 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) { // Checkpoint is named after the last WAL segment present so that when replaying the WAL // we can start from that particular WAL segment. checkpointDir := filepath.Join(w.wal.Dir(), fmt.Sprintf(checkpointPrefix+"%06d", lastSegment)) - level.Info(util.Logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) + level.Info(w.logger).Log("msg", "attempting checkpoint for", "dir", checkpointDir) checkpointDirTemp := checkpointDir + ".tmp" if err := os.MkdirAll(checkpointDirTemp, 0777); err != nil { @@ -389,14 +392,14 @@ func (w *walWrapper) performCheckpoint(immediate bool) (err error) { if err := w.wal.Truncate(lastCh); err != nil { // It is fine to have old WAL segments hanging around if deletion failed. // We can try again next time. - level.Error(util.Logger).Log("msg", "error deleting old WAL segments", "err", err) + level.Error(w.logger).Log("msg", "error deleting old WAL segments", "err", err) } if lastCh >= 0 { if err := w.deleteCheckpoints(lastCh); err != nil { // It is fine to have old checkpoints hanging around if deletion failed. // We can try again next time. - level.Error(util.Logger).Log("msg", "error deleting old checkpoint", "err", err) + level.Error(w.logger).Log("msg", "error deleting old checkpoint", "err", err) } } @@ -520,17 +523,17 @@ func recoverFromWAL(ingester *Ingester) error { params.seriesCache[i] = make(map[string]map[uint64]*memorySeries) } - level.Info(util.Logger).Log("msg", "recovering from checkpoint") + level.Info(ingester.logger).Log("msg", "recovering from checkpoint") start := time.Now() userStates, idx, err := processCheckpointWithRepair(params) if err != nil { return err } elapsed := time.Since(start) - level.Info(util.Logger).Log("msg", "recovered from checkpoint", "time", elapsed.String()) + level.Info(ingester.logger).Log("msg", "recovered from checkpoint", "time", elapsed.String()) if segExists, err := segmentsExist(params.walDir); err == nil && !segExists { - level.Info(util.Logger).Log("msg", "no segments found, skipping recover from segments") + level.Info(ingester.logger).Log("msg", "no segments found, skipping recover from segments") ingester.userStatesMtx.Lock() ingester.userStates = userStates ingester.userStatesMtx.Unlock() @@ -539,13 +542,13 @@ func recoverFromWAL(ingester *Ingester) error { return err } - level.Info(util.Logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx) + level.Info(ingester.logger).Log("msg", "recovering from WAL", "dir", params.walDir, "start_segment", idx) start = time.Now() if err := processWALWithRepair(idx, userStates, params); err != nil { return err } elapsed = time.Since(start) - level.Info(util.Logger).Log("msg", "recovered from WAL", "time", elapsed.String()) + level.Info(ingester.logger).Log("msg", "recovered from WAL", "time", elapsed.String()) ingester.userStatesMtx.Lock() ingester.userStates = userStates @@ -554,20 +557,21 @@ func recoverFromWAL(ingester *Ingester) error { } func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int, error) { + logger := params.ingester.logger // Use a local userStates, so we don't need to worry about locking. - userStates := newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics) + userStates := newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics, params.ingester.logger) lastCheckpointDir, idx, err := lastCheckpoint(params.walDir) if err != nil { return nil, -1, err } if idx < 0 { - level.Info(util.Logger).Log("msg", "no checkpoint found") + level.Info(logger).Log("msg", "no checkpoint found") return userStates, -1, nil } - level.Info(util.Logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir)) + level.Info(logger).Log("msg", fmt.Sprintf("recovering from %s", lastCheckpointDir)) err = processCheckpoint(lastCheckpointDir, userStates, params) if err == nil { @@ -577,7 +581,7 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int // We don't call repair on checkpoint as losing even a single record is like losing the entire data of a series. // We try recovering from the older checkpoint instead. params.ingester.metrics.walCorruptionsTotal.Inc() - level.Error(util.Logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err) + level.Error(logger).Log("msg", "checkpoint recovery failed, deleting this checkpoint and trying to recover from old checkpoint", "err", err) // Deleting this checkpoint to try the previous checkpoint. if err := os.RemoveAll(lastCheckpointDir); err != nil { @@ -592,14 +596,14 @@ func processCheckpointWithRepair(params walRecoveryParameters) (*userStates, int } // Creating new userStates to discard the old chunks. - userStates = newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics) + userStates = newUserStates(params.ingester.limiter, params.ingester.cfg, params.ingester.metrics, params.ingester.logger) if idx < 0 { // There was only 1 checkpoint. We don't error in this case // as for the first checkpoint entire WAL will/should be present. return userStates, -1, nil } - level.Info(util.Logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir)) + level.Info(logger).Log("msg", fmt.Sprintf("attempting recovery from %s", lastCheckpointDir)) if err := processCheckpoint(lastCheckpointDir, userStates, params); err != nil { // We won't attempt the repair again even if its the old checkpoint. params.ingester.metrics.walCorruptionsTotal.Inc() @@ -775,6 +779,7 @@ type samplesWithUserID struct { } func processWALWithRepair(startSegment int, userStates *userStates, params walRecoveryParameters) error { + logger := params.ingester.logger corruptErr := processWAL(startSegment, userStates, params) if corruptErr == nil { @@ -782,18 +787,18 @@ func processWALWithRepair(startSegment int, userStates *userStates, params walRe } params.ingester.metrics.walCorruptionsTotal.Inc() - level.Error(util.Logger).Log("msg", "error in replaying from WAL", "err", corruptErr) + level.Error(logger).Log("msg", "error in replaying from WAL", "err", corruptErr) // Attempt repair. - level.Info(util.Logger).Log("msg", "attempting repair of the WAL") - w, err := wal.New(util.Logger, nil, params.walDir, true) + level.Info(logger).Log("msg", "attempting repair of the WAL") + w, err := wal.New(logger, nil, params.walDir, true) if err != nil { return err } err = w.Repair(corruptErr) if err != nil { - level.Error(util.Logger).Log("msg", "error in repairing WAL", "err", err) + level.Error(logger).Log("msg", "error in repairing WAL", "err", err) } return tsdb_errors.NewMulti(err, w.Close()).Err() @@ -826,7 +831,7 @@ func processWAL(startSegment int, userStates *userStates, params walRecoveryPara go func(input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries) { - processWALSamples(userStates, stateCache, seriesCache, input, output, errChan) + processWALSamples(userStates, stateCache, seriesCache, input, output, errChan, params.ingester.logger) wg.Done() }(inputs[i], outputs[i], params.stateCache[i], params.seriesCache[i]) } @@ -949,7 +954,7 @@ Loop: } func processWALSamples(userStates *userStates, stateCache map[string]*userState, seriesCache map[string]map[uint64]*memorySeries, - input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, errChan chan error) { + input <-chan *samplesWithUserID, output chan<- *samplesWithUserID, errChan chan error, logger log.Logger) { defer close(output) sp := model.SamplePair{} @@ -970,7 +975,7 @@ func processWALSamples(userStates *userStates, stateCache map[string]*userState, // If the series was not created in recovering checkpoint or // from the labels of any records previous to this, there // is no way to get the labels for this fingerprint. - level.Warn(util.Logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String()) + level.Warn(logger).Log("msg", "series not found for sample during wal recovery", "userid", samples.userID, "fingerprint", model.Fingerprint(samples.samples[i].Ref).String()) continue } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go index 989c6bdb92f..d9f309e1112 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/astmapper/parallel.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/prometheus/promql/parser" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var summableAggregates = map[parser.ItemType]struct{}{ @@ -90,7 +90,7 @@ func CanParallelize(node parser.Node) bool { return true default: - level.Error(util.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node)) + level.Error(util_log.Logger).Log("err", fmt.Sprintf("CanParallel: unhandled node type %T", node)) return false } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go new file mode 100644 index 00000000000..0aa6b175c35 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_index.go @@ -0,0 +1,108 @@ +package querier + +import ( + "context" + "time" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" + "github.com/cortexproject/cortex/pkg/util/services" +) + +var ( + errBucketIndexBlocksFinderNotRunning = errors.New("bucket index blocks finder is not running") + errBucketIndexTooOld = errors.New("bucket index is too old and the last time it was updated exceeds the allowed max staleness") +) + +type BucketIndexBlocksFinderConfig struct { + IndexLoader bucketindex.LoaderConfig + MaxStalePeriod time.Duration + IgnoreDeletionMarksDelay time.Duration +} + +// BucketIndexBlocksFinder implements BlocksFinder interface and find blocks in the bucket +// looking up the bucket index. +type BucketIndexBlocksFinder struct { + services.Service + + cfg BucketIndexBlocksFinderConfig + loader *bucketindex.Loader +} + +func NewBucketIndexBlocksFinder(cfg BucketIndexBlocksFinderConfig, bkt objstore.Bucket, logger log.Logger, reg prometheus.Registerer) *BucketIndexBlocksFinder { + loader := bucketindex.NewLoader(cfg.IndexLoader, bkt, logger, reg) + + return &BucketIndexBlocksFinder{ + cfg: cfg, + loader: loader, + Service: loader, + } +} + +// GetBlocks implements BlocksFinder. +func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { + if f.State() != services.Running { + return nil, nil, errBucketIndexBlocksFinderNotRunning + } + if maxT < minT { + return nil, nil, errInvalidBlocksRange + } + + // Get the bucket index for this user. + idx, err := f.loader.GetIndex(ctx, userID) + if errors.Is(err, bucketindex.ErrIndexNotFound) { + // This is a legit edge case, happening when a new tenant has not shipped blocks to the storage yet + // so the bucket index hasn't been created yet. + return nil, nil, nil + } + if err != nil { + return nil, nil, err + } + + // Ensure the bucket index is not too old. + if time.Since(idx.GetUpdatedAt()) > f.cfg.MaxStalePeriod { + return nil, nil, errBucketIndexTooOld + } + + var ( + matchingBlocks = map[ulid.ULID]*bucketindex.Block{} + matchingDeletionMarks = map[ulid.ULID]*bucketindex.BlockDeletionMark{} + ) + + // Filter blocks containing samples within the range. + for _, block := range idx.Blocks { + if !block.Within(minT, maxT) { + continue + } + + matchingBlocks[block.ID] = block + } + + for _, mark := range idx.BlockDeletionMarks { + // Filter deletion marks by matching blocks only. + if _, ok := matchingBlocks[mark.ID]; !ok { + continue + } + + // Exclude blocks marked for deletion. This is the same logic as Thanos IgnoreDeletionMarkFilter. + if time.Since(time.Unix(mark.DeletionTime, 0)).Seconds() > f.cfg.IgnoreDeletionMarksDelay.Seconds() { + delete(matchingBlocks, mark.ID) + continue + } + + matchingDeletionMarks[mark.ID] = mark + } + + // Convert matching blocks into a list. + blocks := make(bucketindex.Blocks, 0, len(matchingBlocks)) + for _, b := range matchingBlocks { + blocks = append(blocks, b) + } + + return blocks, matchingDeletionMarks, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go similarity index 85% rename from vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go rename to vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go index 5d2fb56083f..172a64f3e25 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_scanner.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_finder_bucket_scan.go @@ -25,15 +25,16 @@ import ( "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) var ( - errBlocksScannerNotRunning = errors.New("blocks scanner is not running") - errInvalidBlocksRange = errors.New("invalid blocks time range") + errBucketScanBlocksFinderNotRunning = errors.New("bucket scan blocks finder is not running") + errInvalidBlocksRange = errors.New("invalid blocks time range") ) -type BlocksScannerConfig struct { +type BucketScanBlocksFinderConfig struct { ScanInterval time.Duration TenantsConcurrency int MetasConcurrency int @@ -42,10 +43,11 @@ type BlocksScannerConfig struct { IgnoreDeletionMarksDelay time.Duration } -type BlocksScanner struct { +// BucketScanBlocksFinder is a BlocksFinder implementation periodically scanning the bucket to discover blocks. +type BucketScanBlocksFinder struct { services.Service - cfg BlocksScannerConfig + cfg BucketScanBlocksFinderConfig logger log.Logger bucketClient objstore.Bucket fetchersMetrics *storegateway.MetadataFetcherMetrics @@ -66,8 +68,8 @@ type BlocksScanner struct { scanLastSuccess prometheus.Gauge } -func NewBlocksScanner(cfg BlocksScannerConfig, bucketClient objstore.Bucket, logger log.Logger, reg prometheus.Registerer) *BlocksScanner { - d := &BlocksScanner{ +func NewBucketScanBlocksFinder(cfg BucketScanBlocksFinderConfig, bucketClient objstore.Bucket, logger log.Logger, reg prometheus.Registerer) *BucketScanBlocksFinder { + d := &BucketScanBlocksFinder{ cfg: cfg, logger: logger, bucketClient: bucketClient, @@ -102,10 +104,10 @@ func NewBlocksScanner(cfg BlocksScannerConfig, bucketClient objstore.Bucket, log // GetBlocks returns known blocks for userID containing samples within the range minT // and maxT (milliseconds, both included). Returned blocks are sorted by MaxTime descending. -func (d *BlocksScanner) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { +func (d *BucketScanBlocksFinder) GetBlocks(_ context.Context, userID string, minT, maxT int64) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { // We need to ensure the initial full bucket scan succeeded. if d.State() != services.Running { - return nil, nil, errBlocksScannerNotRunning + return nil, nil, errBucketScanBlocksFinderNotRunning } if maxT < minT { return nil, nil, errInvalidBlocksRange @@ -123,8 +125,7 @@ func (d *BlocksScanner) GetBlocks(_ context.Context, userID string, minT, maxT i // to "now", we're going to find matching blocks iterating the list in reverse order. var matchingMetas bucketindex.Blocks for i := len(userMetas) - 1; i >= 0; i-- { - // NOTE: Block intervals are half-open: [MinTime, MaxTime). - if userMetas[i].MinTime <= maxT && minT < userMetas[i].MaxTime { + if userMetas[i].Within(minT, maxT) { matchingMetas = append(matchingMetas, userMetas[i]) } @@ -147,7 +148,7 @@ func (d *BlocksScanner) GetBlocks(_ context.Context, userID string, minT, maxT i return matchingMetas, matchingDeletionMarks, nil } -func (d *BlocksScanner) starting(ctx context.Context) error { +func (d *BucketScanBlocksFinder) starting(ctx context.Context) error { // Before the service is in the running state it must have successfully // complete the initial scan. if err := d.scanBucket(ctx); err != nil { @@ -158,7 +159,7 @@ func (d *BlocksScanner) starting(ctx context.Context) error { return nil } -func (d *BlocksScanner) scan(ctx context.Context) error { +func (d *BucketScanBlocksFinder) scan(ctx context.Context) error { if err := d.scanBucket(ctx); err != nil { level.Error(d.logger).Log("msg", "failed to scan bucket storage to find blocks", "err", err) } @@ -167,7 +168,7 @@ func (d *BlocksScanner) scan(ctx context.Context) error { return nil } -func (d *BlocksScanner) scanBucket(ctx context.Context) (returnErr error) { +func (d *BucketScanBlocksFinder) scanBucket(ctx context.Context) (returnErr error) { defer func(start time.Time) { d.scanDuration.Observe(time.Since(start).Seconds()) if returnErr == nil { @@ -266,7 +267,7 @@ pushJobsLoop: // scanUserBlocksWithRetries runs scanUserBlocks() retrying multiple times // in case of error. -func (d *BlocksScanner) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas bucketindex.Blocks, deletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, err error) { +func (d *BucketScanBlocksFinder) scanUserBlocksWithRetries(ctx context.Context, userID string) (metas bucketindex.Blocks, deletionMarks map[ulid.ULID]*bucketindex.BlockDeletionMark, err error) { retries := util.NewBackoff(ctx, util.BackoffConfig{ MinBackoff: time.Second, MaxBackoff: 30 * time.Second, @@ -285,7 +286,7 @@ func (d *BlocksScanner) scanUserBlocksWithRetries(ctx context.Context, userID st return } -func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { +func (d *BucketScanBlocksFinder) scanUserBlocks(ctx context.Context, userID string) (bucketindex.Blocks, map[ulid.ULID]*bucketindex.BlockDeletionMark, error) { fetcher, userBucket, deletionMarkFilter, err := d.getOrCreateMetaFetcher(userID) if err != nil { return nil, nil, errors.Wrapf(err, "create meta fetcher for user %s", userID) @@ -327,9 +328,9 @@ func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) (buck } // The blocks scanner expects all blocks to be sorted by max time. - sortBlockMetasByMaxTime(res) + sortBlocksByMaxTime(res) - // Convert deletion marks to our onw data type. + // Convert deletion marks to our own data type. marks := map[ulid.ULID]*bucketindex.BlockDeletionMark{} for id, m := range deletionMarkFilter.DeletionMarkBlocks() { marks[id] = bucketindex.BlockDeletionMarkFromThanosMarker(m) @@ -338,7 +339,7 @@ func (d *BlocksScanner) scanUserBlocks(ctx context.Context, userID string) (buck return res, marks, nil } -func (d *BlocksScanner) getOrCreateMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { +func (d *BucketScanBlocksFinder) getOrCreateMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { d.fetchersMx.Lock() defer d.fetchersMx.Unlock() @@ -360,8 +361,8 @@ func (d *BlocksScanner) getOrCreateMetaFetcher(userID string) (block.MetadataFet return fetcher, userBucket, deletionMarkFilter, nil } -func (d *BlocksScanner) createMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { - userLogger := util.WithUserID(userID, d.logger) +func (d *BucketScanBlocksFinder) createMetaFetcher(userID string) (block.MetadataFetcher, objstore.Bucket, *block.IgnoreDeletionMarkFilter, error) { + userLogger := util_log.WithUserID(userID, d.logger) userBucket := bucket.NewUserBucketClient(userID, d.bucketClient) userReg := prometheus.NewRegistry() @@ -392,7 +393,7 @@ func (d *BlocksScanner) createMetaFetcher(userID string) (block.MetadataFetcher, return f, userBucket, deletionMarkFilter, nil } -func (d *BlocksScanner) getBlockMeta(userID string, blockID ulid.ULID) *bucketindex.Block { +func (d *BucketScanBlocksFinder) getBlockMeta(userID string, blockID ulid.ULID) *bucketindex.Block { d.userMx.RLock() defer d.userMx.RUnlock() @@ -404,7 +405,7 @@ func (d *BlocksScanner) getBlockMeta(userID string, blockID ulid.ULID) *bucketin return metas[blockID] } -func sortBlockMetasByMaxTime(blocks bucketindex.Blocks) { +func sortBlocksByMaxTime(blocks bucketindex.Blocks) { sort.Slice(blocks, func(i, j int) bool { return blocks[i].MaxTime < blocks[j].MaxTime }) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go index 4221c3a7044..8c94ad4b64f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_balanced_set.go @@ -17,8 +17,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring/client" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/tls" ) // BlocksStoreSet implementation used when the blocks are not sharded in the store-gateway @@ -31,7 +31,7 @@ type blocksStoreBalancedSet struct { dnsProvider *dns.Provider } -func newBlocksStoreBalancedSet(serviceAddresses []string, tlsCfg tls.ClientConfig, logger log.Logger, reg prometheus.Registerer) *blocksStoreBalancedSet { +func newBlocksStoreBalancedSet(serviceAddresses []string, clientConfig ClientConfig, logger log.Logger, reg prometheus.Registerer) *blocksStoreBalancedSet { const dnsResolveInterval = 10 * time.Second dnsProviderReg := extprom.WrapRegistererWithPrefix("cortex_storegateway_client_", reg) @@ -39,7 +39,7 @@ func newBlocksStoreBalancedSet(serviceAddresses []string, tlsCfg tls.ClientConfi s := &blocksStoreBalancedSet{ serviceAddresses: serviceAddresses, dnsProvider: dns.NewProvider(logger, dnsProviderReg, dns.GolangResolverType), - clientsPool: newStoreGatewayClientPool(nil, tlsCfg, logger, reg), + clientsPool: newStoreGatewayClientPool(nil, clientConfig, logger, reg), } s.Service = services.NewTimerService(dnsResolveInterval, s.starting, s.resolve, nil) @@ -53,7 +53,7 @@ func (s *blocksStoreBalancedSet) starting(ctx context.Context) error { func (s *blocksStoreBalancedSet) resolve(ctx context.Context) error { if err := s.dnsProvider.Resolve(ctx, s.serviceAddresses); err != nil { - level.Error(util.Logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses) + level.Error(util_log.Logger).Log("msg", "failed to resolve store-gateway addresses", "err", err, "addresses", s.serviceAddresses) } return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go index f60ec718227..6bc049fe733 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_queryable.go @@ -37,6 +37,8 @@ import ( "github.com/cortexproject/cortex/pkg/storegateway/storegatewaypb" "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" + "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/services" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -159,20 +161,35 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa return nil, errors.Wrap(err, "failed to create bucket client") } - // Blocks scanner doesn't use chunks, but we pass config for consistency. + // Blocks finder doesn't use chunks, but we pass config for consistency. cachingBucket, err := cortex_tsdb.CreateCachingBucket(storageCfg.BucketStore.ChunksCache, storageCfg.BucketStore.MetadataCache, bucketClient, logger, extprom.WrapRegistererWith(prometheus.Labels{"component": "querier"}, reg)) if err != nil { return nil, errors.Wrap(err, "create caching bucket") } bucketClient = cachingBucket - scanner := NewBlocksScanner(BlocksScannerConfig{ - ScanInterval: storageCfg.BucketStore.SyncInterval, - TenantsConcurrency: storageCfg.BucketStore.TenantSyncConcurrency, - MetasConcurrency: storageCfg.BucketStore.MetaSyncConcurrency, - CacheDir: storageCfg.BucketStore.SyncDir, - IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, - }, bucketClient, logger, reg) + // Create the blocks finder. + var finder BlocksFinder + if storageCfg.BucketStore.BucketIndex.Enabled { + finder = NewBucketIndexBlocksFinder(BucketIndexBlocksFinderConfig{ + IndexLoader: bucketindex.LoaderConfig{ + CheckInterval: time.Minute, + UpdateOnStaleInterval: storageCfg.BucketStore.SyncInterval, + UpdateOnErrorInterval: storageCfg.BucketStore.BucketIndex.UpdateOnErrorInterval, + IdleTimeout: storageCfg.BucketStore.BucketIndex.IdleTimeout, + }, + MaxStalePeriod: storageCfg.BucketStore.BucketIndex.MaxStalePeriod, + IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, + }, bucketClient, logger, reg) + } else { + finder = NewBucketScanBlocksFinder(BucketScanBlocksFinderConfig{ + ScanInterval: storageCfg.BucketStore.SyncInterval, + TenantsConcurrency: storageCfg.BucketStore.TenantSyncConcurrency, + MetasConcurrency: storageCfg.BucketStore.MetaSyncConcurrency, + CacheDir: storageCfg.BucketStore.SyncDir, + IgnoreDeletionMarksDelay: storageCfg.BucketStore.IgnoreDeletionMarksDelay, + }, bucketClient, logger, reg) + } if gatewayCfg.ShardingEnabled { storesRingCfg := gatewayCfg.ShardingRing.ToRingConfig() @@ -185,7 +202,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa return nil, errors.Wrap(err, "failed to create store-gateway ring backend") } - storesRing, err := ring.NewWithStoreClientAndStrategy(storesRingCfg, storegateway.RingNameForClient, storegateway.RingKey, storesRingBackend, &storegateway.BlocksReplicationStrategy{}) + storesRing, err := ring.NewWithStoreClientAndStrategy(storesRingCfg, storegateway.RingNameForClient, storegateway.RingKey, storesRingBackend, ring.NewIgnoreUnhealthyInstancesReplicationStrategy()) if err != nil { return nil, errors.Wrap(err, "failed to create store-gateway ring client") } @@ -218,7 +235,7 @@ func NewBlocksStoreQueryableFromConfig(querierCfg Config, gatewayCfg storegatewa reg, ) - return NewBlocksStoreQueryable(stores, scanner, consistency, limits, querierCfg.QueryStoreAfter, logger, reg) + return NewBlocksStoreQueryable(stores, finder, consistency, limits, querierCfg.QueryStoreAfter, logger, reg) } func (q *BlocksStoreQueryable) starting(ctx context.Context) error { @@ -432,7 +449,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg if q.queryStoreAfter > 0 { now := time.Now() origMaxT := maxT - maxT = util.Min64(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter))) + maxT = math.Min64(maxT, util.TimeToMillis(now.Add(-q.queryStoreAfter))) if origMaxT != maxT { level.Debug(logger).Log("msg", "the max time of the query to blocks storage has been manipulated", "original", origMaxT, "updated", maxT) @@ -519,7 +536,7 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg } // We've not been able to query all expected blocks after all retries. - level.Warn(util.WithContext(ctx, logger)).Log("msg", "failed consistency check", "err", err) + level.Warn(util_log.WithContext(ctx, logger)).Log("msg", "failed consistency check", "err", err) return fmt.Errorf("consistency check failed because some blocks were not queried: %s", strings.Join(convertULIDsToString(remainingBlocks), " ")) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go index 0730d393fc8..bf6f2f847ce 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/blocks_store_replicated_set.go @@ -15,7 +15,6 @@ import ( "github.com/cortexproject/cortex/pkg/storegateway" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" - "github.com/cortexproject/cortex/pkg/util/tls" ) // BlocksStoreSet implementation used when the blocks are sharded and replicated across @@ -37,13 +36,13 @@ func newBlocksStoreReplicationSet( storesRing *ring.Ring, shardingStrategy string, limits BlocksStoreLimits, - tlsCfg tls.ClientConfig, + clientConfig ClientConfig, logger log.Logger, reg prometheus.Registerer, ) (*blocksStoreReplicationSet, error) { s := &blocksStoreReplicationSet{ storesRing: storesRing, - clientsPool: newStoreGatewayClientPool(client.NewRingServiceDiscovery(storesRing), tlsCfg, logger, reg), + clientsPool: newStoreGatewayClientPool(client.NewRingServiceDiscovery(storesRing), clientConfig, logger, reg), shardingStrategy: shardingStrategy, limits: limits, } @@ -98,12 +97,11 @@ func (s *blocksStoreReplicationSet) GetClientsFor(userID string, blockIDs []ulid // Find the replication set of each block we need to query. for _, blockID := range blockIDs { - // Buffer internally used by the ring (give extra room for a JOINING + LEAVING instance). // Do not reuse the same buffer across multiple Get() calls because we do retain the // returned replication set. - buf := make([]ring.IngesterDesc, 0, userRing.ReplicationFactor()+2) + bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet() - set, err := userRing.Get(cortex_tsdb.HashBlockID(blockID), ring.BlocksRead, buf) + set, err := userRing.Get(cortex_tsdb.HashBlockID(blockID), storegateway.BlocksRead, bufDescs, bufHosts, bufZones) if err != nil { return nil, errors.Wrapf(err, "failed to get store-gateway replication set owning the block %s", blockID.String()) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go index 2c4800e68cf..b612818bafb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/distributor_queryable.go @@ -17,6 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/chunkcompat" + "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/spanlogger" ) @@ -103,7 +104,7 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers .. if q.queryIngestersWithin > 0 { now := time.Now() origMinT := minT - minT = util.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin))) + minT = math.Max64(minT, util.TimeToMillis(now.Add(-q.queryIngestersWithin))) if origMinT != minT { level.Debug(log).Log("msg", "the min time of the query to ingesters has been manipulated", "original", origMinT, "updated", minT) @@ -116,7 +117,7 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers .. } if q.streaming { - return q.streamingSelect(minT, maxT, matchers) + return q.streamingSelect(ctx, minT, maxT, matchers) } matrix, err := q.distributor.Query(ctx, model.Time(minT), model.Time(maxT), matchers...) @@ -128,13 +129,13 @@ func (q *distributorQuerier) Select(_ bool, sp *storage.SelectHints, matchers .. return series.MatrixToSeriesSet(matrix) } -func (q *distributorQuerier) streamingSelect(minT, maxT int64, matchers []*labels.Matcher) storage.SeriesSet { - userID, err := tenant.TenantID(q.ctx) +func (q *distributorQuerier) streamingSelect(ctx context.Context, minT, maxT int64, matchers []*labels.Matcher) storage.SeriesSet { + userID, err := tenant.TenantID(ctx) if err != nil { return storage.ErrSeriesSet(err) } - results, err := q.distributor.QueryStream(q.ctx, model.Time(minT), model.Time(maxT), matchers...) + results, err := q.distributor.QueryStream(ctx, model.Time(minT), model.Time(maxT), matchers...) if err != nil { return storage.ErrSeriesSet(err) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go index f24051489c6..4599df2a5ae 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/querier.go @@ -28,8 +28,8 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" - "github.com/cortexproject/cortex/pkg/util/tls" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -43,6 +43,7 @@ type Config struct { MaxSamples int `yaml:"max_samples"` QueryIngestersWithin time.Duration `yaml:"query_ingesters_within"` QueryStoreForLabels bool `yaml:"query_store_for_labels_enabled"` + AtModifierEnabled bool `yaml:"at_modifier_enabled"` // QueryStoreAfter the time after which queries should also be sent to the store and not just ingesters. QueryStoreAfter time.Duration `yaml:"query_store_after"` @@ -63,8 +64,8 @@ type Config struct { LookbackDelta time.Duration `yaml:"lookback_delta"` // Blocks storage only. - StoreGatewayAddresses string `yaml:"store_gateway_addresses"` - StoreGatewayClient tls.ClientConfig `yaml:"store_gateway_client"` + StoreGatewayAddresses string `yaml:"store_gateway_addresses"` + StoreGatewayClient ClientConfig `yaml:"store_gateway_client"` SecondStoreEngine string `yaml:"second_store_engine"` UseSecondStoreBeforeTime flagext.Time `yaml:"use_second_store_before_time"` @@ -89,9 +90,10 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, "Maximum number of samples a single query can load into memory.") f.DurationVar(&cfg.QueryIngestersWithin, "querier.query-ingesters-within", 0, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.BoolVar(&cfg.QueryStoreForLabels, "querier.query-store-for-labels-enabled", false, "Query long-term store for series, label values and label names APIs. Works only with blocks engine.") + f.BoolVar(&cfg.AtModifierEnabled, "querier.at-modifier-enabled", false, "Enable the @ modifier in PromQL.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, "The default evaluation interval or step size for subqueries.") - f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should only be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") + f.DurationVar(&cfg.QueryStoreAfter, "querier.query-store-after", 0, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. When running the blocks storage, if this option is enabled, the time range of the query sent to the store will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") f.StringVar(&cfg.ActiveQueryTrackerDir, "querier.active-query-tracker-dir", "./active-query-tracker", "Active query tracker monitors active queries, and writes them to the file in given directory. If Cortex discovers any queries in this log during startup, it will log them to the log file. Setting to empty value disables active query tracker, which also disables -querier.max-concurrent option.") f.StringVar(&cfg.StoreGatewayAddresses, "querier.store-gateway-addresses", "", "Comma separated list of store-gateway addresses in DNS Service Discovery format. This option should be set when using the blocks storage and the store-gateway sharding is disabled (when enabled, the store-gateway instances form a ring and addresses are picked from the ring).") f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, "Time since the last sample after which a time series is considered stale and ignored by expression evaluations.") @@ -165,17 +167,24 @@ func New(cfg Config, limits *validation.Overrides, distributor Distributor, stor }) engine := promql.NewEngine(promql.EngineOpts{ - Logger: util.Logger, + Logger: util_log.Logger, Reg: reg, ActiveQueryTracker: createActiveQueryTracker(cfg), MaxSamples: cfg.MaxSamples, Timeout: cfg.Timeout, LookbackDelta: cfg.LookbackDelta, + EnableAtModifier: cfg.AtModifierEnabled, NoStepSubqueryIntervalFn: func(int64) int64 { return cfg.DefaultEvaluationInterval.Milliseconds() }, }) - return &sampleAndChunkQueryable{lazyQueryable}, engine + return NewSampleAndChunkQueryable(lazyQueryable), engine +} + +// NewSampleAndChunkQueryable creates a SampleAndChunkQueryable from a +// Queryable with a ChunkQueryable stub, that errors once it get's called. +func NewSampleAndChunkQueryable(q storage.Queryable) storage.SampleAndChunkQueryable { + return &sampleAndChunkQueryable{q} } type sampleAndChunkQueryable struct { @@ -190,7 +199,7 @@ func createActiveQueryTracker(cfg Config) *promql.ActiveQueryTracker { dir := cfg.ActiveQueryTrackerDir if dir != "" { - return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util.Logger) + return promql.NewActiveQueryTracker(dir, cfg.MaxConcurrent, util_log.Logger) } return nil diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go index d61bf8de444..b477c4abab3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/limits.go @@ -52,13 +52,14 @@ func (l limitsMiddleware) Do(ctx context.Context, r Request) (Response, error) { log, ctx := spanlogger.New(ctx, "limits") defer log.Finish() - userID, err := tenant.TenantID(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } // Clamp the time range based on the max query lookback. - if maxQueryLookback := l.MaxQueryLookback(userID); maxQueryLookback > 0 { + + if maxQueryLookback := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, l.MaxQueryLookback); maxQueryLookback > 0 { minStartTime := util.TimeToMillis(time.Now().Add(-maxQueryLookback)) if r.GetEnd() < minStartTime { @@ -85,7 +86,7 @@ func (l limitsMiddleware) Do(ctx context.Context, r Request) (Response, error) { } // Enforce the max query length. - if maxQueryLength := l.MaxQueryLength(userID); maxQueryLength > 0 { + if maxQueryLength := validation.SmallestPositiveNonZeroDurationPerTenant(tenantIDs, l.MaxQueryLength); maxQueryLength > 0 { queryLen := timestamp.Time(r.GetEnd()).Sub(timestamp.Time(r.GetStart())) if queryLen > maxQueryLength { return nil, httpgrpc.Errorf(http.StatusBadRequest, validation.ErrQueryTooLong, queryLen, maxQueryLength) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go index 6e146a98e5f..282ea4655c9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/query_range.go @@ -3,6 +3,7 @@ package queryrange import ( "bytes" "context" + "fmt" "io/ioutil" "math" "net/http" @@ -13,6 +14,7 @@ import ( "time" "github.com/gogo/protobuf/proto" + "github.com/gogo/status" jsoniter "github.com/json-iterator/go" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" @@ -186,12 +188,12 @@ func (prometheusCodec) DecodeRequest(_ context.Context, r *http.Request) (Reques var err error result.Start, err = util.ParseTime(r.FormValue("start")) if err != nil { - return nil, err + return nil, decorateWithParamName(err, "start") } result.End, err = util.ParseTime(r.FormValue("end")) if err != nil { - return nil, err + return nil, decorateWithParamName(err, "end") } if result.End < result.Start { @@ -200,7 +202,7 @@ func (prometheusCodec) DecodeRequest(_ context.Context, r *http.Request) (Reques result.Step, err = parseDurationMs(r.FormValue("step")) if err != nil { - return nil, err + return nil, decorateWithParamName(err, "step") } if result.Step <= 0 { @@ -392,3 +394,11 @@ func encodeTime(t int64) string { func encodeDurationMs(d int64) string { return strconv.FormatFloat(float64(d)/float64(time.Second/time.Millisecond), 'f', -1, 64) } + +func decorateWithParamName(err error, field string) error { + errTmpl := "invalid parameter %q; %v" + if status, ok := status.FromError(err); ok { + return httpgrpc.Errorf(int(status.Code()), errTmpl, field, status.Message()) + } + return fmt.Errorf(errTmpl, field, err) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go index 9b7b0e3baea..752b3ed463f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/results_cache.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "sort" + "strings" "time" "github.com/go-kit/kit/log" @@ -17,6 +18,7 @@ import ( "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" + "github.com/prometheus/prometheus/promql/parser" "github.com/uber/jaeger-client-go" "github.com/weaveworks/common/httpgrpc" @@ -25,6 +27,7 @@ import ( "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/spanlogger" + "github.com/cortexproject/cortex/pkg/util/validation" ) var ( @@ -36,7 +39,7 @@ var ( ) type CacheGenNumberLoader interface { - GetResultsCacheGenNumber(userID string) string + GetResultsCacheGenNumber(tenantIDs []string) string } // ResultsCacheConfig is the config for the results cache. @@ -128,6 +131,7 @@ type resultsCache struct { splitter CacheSplitter extractor Extractor + minCacheExtent int64 // discard any cache extent smaller than this merger Merger cacheGenNumberLoader CacheGenNumberLoader shouldCache ShouldCacheFn @@ -171,6 +175,7 @@ func NewResultsCacheMiddleware( limits: limits, merger: merger, extractor: extractor, + minCacheExtent: (5 * time.Minute).Milliseconds(), splitter: splitter, cacheGenNumberLoader: cacheGenNumberLoader, shouldCache: shouldCache, @@ -179,7 +184,7 @@ func NewResultsCacheMiddleware( } func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { - userID, err := tenant.TenantID(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } @@ -189,16 +194,16 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { } if s.cacheGenNumberLoader != nil { - ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(userID)) + ctx = cache.InjectCacheGenNumber(ctx, s.cacheGenNumberLoader.GetResultsCacheGenNumber(tenantIDs)) } var ( - key = s.splitter.GenerateCacheKey(userID, r) + key = s.splitter.GenerateCacheKey(tenant.JoinTenantIDs(tenantIDs), r) extents []Extent response Response ) - maxCacheFreshness := s.limits.MaxCacheFreshness(userID) + maxCacheFreshness := validation.MaxDurationPerTenant(tenantIDs, s.limits.MaxCacheFreshness) maxCacheTime := int64(model.Now().Add(-maxCacheFreshness)) if r.GetStart() > maxCacheTime { return s.next.Do(ctx, r) @@ -206,9 +211,9 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { cached, ok := s.get(ctx, key) if ok { - response, extents, err = s.handleHit(ctx, r, cached) + response, extents, err = s.handleHit(ctx, r, cached, maxCacheTime) } else { - response, extents, err = s.handleMiss(ctx, r) + response, extents, err = s.handleMiss(ctx, r, maxCacheTime) } if err == nil && len(extents) > 0 { @@ -223,7 +228,7 @@ func (s resultsCache) Do(ctx context.Context, r Request) (Response, error) { } // shouldCacheResponse says whether the response should be cached or not. -func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool { +func (s resultsCache) shouldCacheResponse(ctx context.Context, req Request, r Response, maxCacheTime int64) bool { headerValues := getHeaderValuesWithName(r, cacheControlHeader) for _, v := range headerValues { if v == noStoreValue { @@ -232,6 +237,10 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool } } + if !s.isAtModifierCachable(req, maxCacheTime) { + return false + } + if s.cacheGenNumberLoader == nil { return true } @@ -254,6 +263,55 @@ func (s resultsCache) shouldCacheResponse(ctx context.Context, r Response) bool return true } +var errAtModifierAfterEnd = errors.New("at modifier after end") + +// isAtModifierCachable returns true if the @ modifier result +// is safe to cache. +func (s resultsCache) isAtModifierCachable(r Request, maxCacheTime int64) bool { + // There are 2 cases when @ modifier is not safe to cache: + // 1. When @ modifier points to time beyond the maxCacheTime. + // 2. If the @ modifier time is > the query range end while being + // below maxCacheTime. In such cases if any tenant is intentionally + // playing with old data, we could cache empty result if we look + // beyond query end. + query := r.GetQuery() + if !strings.Contains(query, "@") { + return true + } + expr, err := parser.ParseExpr(query) + if err != nil { + // We are being pessimistic in such cases. + level.Warn(s.logger).Log("msg", "failed to parse query, considering @ modifier as not cachable", "query", query, "err", err) + return false + } + + end := r.GetEnd() + atModCachable := true + parser.Inspect(expr, func(n parser.Node, _ []parser.Node) error { + switch e := n.(type) { + case *parser.VectorSelector: + if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) { + atModCachable = false + return errAtModifierAfterEnd + } + case *parser.MatrixSelector: + ts := e.VectorSelector.(*parser.VectorSelector).Timestamp + if ts != nil && (*ts > end || *ts > maxCacheTime) { + atModCachable = false + return errAtModifierAfterEnd + } + case *parser.SubqueryExpr: + if e.Timestamp != nil && (*e.Timestamp > end || *e.Timestamp > maxCacheTime) { + atModCachable = false + return errAtModifierAfterEnd + } + } + return nil + }) + + return atModCachable +} + func getHeaderValuesWithName(r Response, headerName string) (headerValues []string) { for _, hv := range r.GetHeaders() { if hv.GetName() != headerName { @@ -266,13 +324,13 @@ func getHeaderValuesWithName(r Response, headerName string) (headerValues []stri return } -func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Extent, error) { +func (s resultsCache) handleMiss(ctx context.Context, r Request, maxCacheTime int64) (Response, []Extent, error) { response, err := s.next.Do(ctx, r) if err != nil { return nil, nil, err } - if !s.shouldCacheResponse(ctx, response) { + if !s.shouldCacheResponse(ctx, r, response, maxCacheTime) { return response, []Extent{}, nil } @@ -287,7 +345,7 @@ func (s resultsCache) handleMiss(ctx context.Context, r Request) (Response, []Ex return response, extents, nil } -func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent) (Response, []Extent, error) { +func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent, maxCacheTime int64) (Response, []Extent, error) { var ( reqResps []RequestResponse err error @@ -295,7 +353,7 @@ func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent log, ctx := spanlogger.New(ctx, "handleHit") defer log.Finish() - requests, responses, err := partition(r, extents, s.extractor) + requests, responses, err := s.partition(r, extents) if err != nil { return nil, nil, err } @@ -312,7 +370,7 @@ func (s resultsCache) handleHit(ctx context.Context, r Request, extents []Extent for _, reqResp := range reqResps { responses = append(responses, reqResp.Response) - if !s.shouldCacheResponse(ctx, reqResp.Response) { + if !s.shouldCacheResponse(ctx, r, reqResp.Response, maxCacheTime) { continue } extent, err := toExtent(ctx, reqResp.Request, s.extractor.ResponseWithoutHeaders(reqResp.Response)) @@ -410,7 +468,8 @@ func toExtent(ctx context.Context, req Request, res Response) (Extent, error) { } // partition calculates the required requests to satisfy req given the cached data. -func partition(req Request, extents []Extent, extractor Extractor) ([]Request, []Response, error) { +// extents must be in order by start time. +func (s resultsCache) partition(req Request, extents []Extent) ([]Request, []Response, error) { var requests []Request var cachedResponses []Response start := req.GetStart() @@ -420,6 +479,10 @@ func partition(req Request, extents []Extent, extractor Extractor) ([]Request, [ if extent.GetEnd() < start || extent.Start > req.GetEnd() { continue } + // If this extent is tiny, discard it: more efficient to do a few larger queries + if extent.End-extent.Start < s.minCacheExtent { + continue + } // If there is a bit missing at the front, make a request for that. if start < extent.Start { @@ -431,10 +494,11 @@ func partition(req Request, extents []Extent, extractor Extractor) ([]Request, [ return nil, nil, err } // extract the overlap from the cached extent. - cachedResponses = append(cachedResponses, extractor.Extract(start, req.GetEnd(), res)) + cachedResponses = append(cachedResponses, s.extractor.Extract(start, req.GetEnd(), res)) start = extent.End } + // Lastly, make a request for any data missing at the end. if start < req.GetEnd() { r := req.WithStartEnd(start, req.GetEnd()) requests = append(requests, r) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go index 32d16b92171..28101938357 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/retry.go @@ -9,7 +9,7 @@ import ( "github.com/prometheus/client_golang/prometheus/promauto" "github.com/weaveworks/common/httpgrpc" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type RetryMiddlewareMetrics struct { @@ -70,7 +70,7 @@ func (r retry) Do(ctx context.Context, req Request) (Response, error) { httpResp, ok := httpgrpc.HTTPResponseFromError(err) if !ok || httpResp.Code/100 == 5 { lastErr = err - level.Error(util.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "err", err) + level.Error(util_log.WithContext(ctx, r.log)).Log("msg", "error processing request", "try", tries, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go index 053268d774d..9015796d8f2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/roundtrip.go @@ -218,12 +218,12 @@ func NewTripperware( op = "query_range" } - user, err := tenant.TenantID(r.Context()) + tenantIDs, err := tenant.TenantIDs(r.Context()) // This should never happen anyways because we have auth middleware before this. if err != nil { return nil, err } - queriesPerTenant.WithLabelValues(op, user).Inc() + queriesPerTenant.WithLabelValues(op, tenant.JoinTenantIDs(tenantIDs)).Inc() if !isQueryRange { return next.RoundTrip(r) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go index ecbbe98f794..2b82e8b3b06 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/queryrange/util.go @@ -7,6 +7,7 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/tenant" + "github.com/cortexproject/cortex/pkg/util/validation" ) // RequestResponse contains a request response and the respective request that was used. @@ -17,7 +18,7 @@ type RequestResponse struct { // DoRequests executes a list of requests in parallel. The limits parameters is used to limit parallelism per single request. func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits Limits) ([]RequestResponse, error) { - userid, err := tenant.TenantID(ctx) + tenantIDs, err := tenant.TenantIDs(ctx) if err != nil { return nil, httpgrpc.Errorf(http.StatusBadRequest, err.Error()) } @@ -36,7 +37,7 @@ func DoRequests(ctx context.Context, downstream Handler, reqs []Request, limits }() respChan, errChan := make(chan RequestResponse), make(chan error) - parallelism := limits.MaxQueryParallelism(userid) + parallelism := validation.SmallestPositiveIntPerTenant(tenantIDs, limits.MaxQueryParallelism) if parallelism > len(reqs) { parallelism = len(reqs) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go index df9c3cb0d47..097aeda895c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/remote_read.go @@ -8,6 +8,7 @@ import ( "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" ) // Queries are a set of matchers with time ranges - should not get into megabytes @@ -16,13 +17,11 @@ const maxRemoteReadQuerySize = 1024 * 1024 // RemoteReadHandler handles Prometheus remote read requests. func RemoteReadHandler(q storage.Queryable) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Read-Version")) - ctx := r.Context() var req client.ReadRequest - logger := util.WithContext(r.Context(), util.Logger) - if err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRemoteReadQuerySize, &req, compressionType); err != nil { - level.Error(logger).Log("err", err.Error()) + logger := log.WithContext(r.Context(), log.Logger) + if err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRemoteReadQuerySize, &req, util.RawSnappy); err != nil { + level.Error(logger).Log("msg", "failed to parse proto", "err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } @@ -68,7 +67,7 @@ func RemoteReadHandler(q storage.Queryable) http.Handler { return } w.Header().Add("Content-Type", "application/x-protobuf") - if err := util.SerializeProtoResponse(w, &resp, compressionType); err != nil { + if err := util.SerializeProtoResponse(w, &resp, util.RawSnappy); err != nil { level.Error(logger).Log("msg", "error sending remote read response", "err", err) } }) diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go index 0de38e08833..05a7de53473 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/stats.go @@ -29,6 +29,13 @@ func FromContext(ctx context.Context) *Stats { return o.(*Stats) } +// IsEnabled returns whether stats tracking is enabled in the context. +func IsEnabled(ctx context.Context) bool { + // When query statistics are enabled, the stats object is already initialised + // within the context, so we can just check it. + return FromContext(ctx) != nil +} + // AddWallTime adds some time to the counter. func (s *Stats) AddWallTime(t time.Duration) { if s == nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go index d91cae6b45e..a34697a66bb 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/stats/time_middleware.go @@ -16,14 +16,15 @@ func NewWallTimeMiddleware() WallTimeMiddleware { // Wrap implements middleware.Interface. func (m WallTimeMiddleware) Wrap(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - stats := FromContext(r.Context()) - if stats == nil { + if !IsEnabled(r.Context()) { next.ServeHTTP(w, r) return } startTime := time.Now() next.ServeHTTP(w, r) + + stats := FromContext(r.Context()) stats.AddWallTime(time.Since(startTime)) }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go b/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go index f5f748ec6ca..e956a83ec59 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/store_gateway_client.go @@ -1,6 +1,7 @@ package querier import ( + "flag" "time" "github.com/go-kit/kit/log" @@ -16,7 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/util/tls" ) -func newStoreGatewayClientFactory(clientCfg grpcclient.Config, tlsCfg tls.ClientConfig, reg prometheus.Registerer) client.PoolFactory { +func newStoreGatewayClientFactory(clientCfg grpcclient.Config, reg prometheus.Registerer) client.PoolFactory { requestDuration := promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", Name: "storegateway_client_request_duration_seconds", @@ -26,16 +27,16 @@ func newStoreGatewayClientFactory(clientCfg grpcclient.Config, tlsCfg tls.Client }, []string{"operation", "status_code"}) return func(addr string) (client.PoolClient, error) { - return dialStoreGatewayClient(clientCfg, tlsCfg, addr, requestDuration) + return dialStoreGatewayClient(clientCfg, addr, requestDuration) } } -func dialStoreGatewayClient(clientCfg grpcclient.Config, tlsCfg tls.ClientConfig, addr string, requestDuration *prometheus.HistogramVec) (*storeGatewayClient, error) { - opts, err := tlsCfg.GetGRPCDialOptions() +func dialStoreGatewayClient(clientCfg grpcclient.Config, addr string, requestDuration *prometheus.HistogramVec) (*storeGatewayClient, error) { + opts, err := clientCfg.DialOption(grpcclient.Instrument(requestDuration)) if err != nil { return nil, err } - opts = append(opts, clientCfg.DialOption(grpcclient.Instrument(requestDuration))...) + conn, err := grpc.Dial(addr, opts...) if err != nil { return nil, errors.Wrapf(err, "failed to dial store-gateway %s", addr) @@ -66,15 +67,17 @@ func (c *storeGatewayClient) RemoteAddress() string { return c.conn.Target() } -func newStoreGatewayClientPool(discovery client.PoolServiceDiscovery, tlsCfg tls.ClientConfig, logger log.Logger, reg prometheus.Registerer) *client.Pool { +func newStoreGatewayClientPool(discovery client.PoolServiceDiscovery, clientConfig ClientConfig, logger log.Logger, reg prometheus.Registerer) *client.Pool { // We prefer sane defaults instead of exposing further config options. clientCfg := grpcclient.Config{ MaxRecvMsgSize: 100 << 20, MaxSendMsgSize: 16 << 20, - UseGzipCompression: false, + GRPCCompression: "", RateLimit: 0, RateLimitBurst: 0, BackoffOnRatelimits: false, + TLSEnabled: clientConfig.TLSEnabled, + TLS: clientConfig.TLS, } poolCfg := client.PoolConfig{ CheckInterval: time.Minute, @@ -89,5 +92,15 @@ func newStoreGatewayClientPool(discovery client.PoolServiceDiscovery, tlsCfg tls ConstLabels: map[string]string{"client": "querier"}, }) - return client.NewPool("store-gateway", poolCfg, discovery, newStoreGatewayClientFactory(clientCfg, tlsCfg, reg), clientsCount, logger) + return client.NewPool("store-gateway", poolCfg, discovery, newStoreGatewayClientFactory(clientCfg, reg), clientsCount, logger) +} + +type ClientConfig struct { + TLSEnabled bool `yaml:"tls_enabled"` + TLS tls.ClientConfig `yaml:",inline"` +} + +func (cfg *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.BoolVar(&cfg.TLSEnabled, prefix+".tls-enabled", cfg.TLSEnabled, "Enable TLS for gRPC client connecting to store-gateway.") + cfg.TLS.RegisterFlagsWithPrefix(prefix, f) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go b/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go new file mode 100644 index 00000000000..f28848a080d --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/merge_queryable.go @@ -0,0 +1,309 @@ +package tenantfederation + +import ( + "context" + "fmt" + "sort" + + "github.com/pkg/errors" + "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/weaveworks/common/user" + + "github.com/cortexproject/cortex/pkg/tenant" +) + +const ( + defaultTenantLabel = "__tenant_id__" + retainExistingPrefix = "original_" + originalDefaultTenantLabel = retainExistingPrefix + defaultTenantLabel +) + +// NewQueryable returns a queryable that iterates through all the tenant IDs +// that are part of the request and aggregates the results from each tenant's +// Querier by sending of subsequent requests. +// The result contains a label tenantLabelName to identify the tenant ID that +// it originally resulted from. +// If the label tenantLabelName is already existing, its value is overwritten +// by the tenant ID and the previous value is exposed through a new label +// prefixed with "original_". This behaviour is not implemented recursively +func NewQueryable(upstream storage.Queryable) storage.Queryable { + return &mergeQueryable{ + upstream: upstream, + } +} + +type mergeQueryable struct { + upstream storage.Queryable +} + +// Querier returns a new mergeQuerier, which aggregates results from multiple +// tenants into a single result. +func (m *mergeQueryable) Querier(ctx context.Context, mint int64, maxt int64) (storage.Querier, error) { + tenantIDs, err := tenant.TenantIDs(ctx) + if err != nil { + return nil, err + } + + if len(tenantIDs) <= 1 { + return m.upstream.Querier(ctx, mint, maxt) + } + + var queriers = make([]storage.Querier, len(tenantIDs)) + for pos, tenantID := range tenantIDs { + q, err := m.upstream.Querier( + user.InjectOrgID(ctx, tenantID), + mint, + maxt, + ) + if err != nil { + return nil, err + } + queriers[pos] = q + } + + return &mergeQuerier{ + queriers: queriers, + tenantIDs: tenantIDs, + }, nil +} + +// mergeQuerier aggregates the results from underlying queriers and adds a +// label tenantLabelName to identify the tenant ID that the metric resulted +// from. +// If the label tenantLabelName is already existing, its value is +// overwritten by the tenant ID and the previous value is exposed through a new +// label prefixed with "original_". This behaviour is not implemented recursively +type mergeQuerier struct { + queriers []storage.Querier + tenantIDs []string +} + +// LabelValues returns all potential values for a label name. +// It is not safe to use the strings beyond the lifefime of the querier. +// For the label "tenantLabelName" it will return all the tenant IDs available. +// For the label "original_" + tenantLabelName it will return all the values +// of the underlying queriers for tenantLabelName. +func (m *mergeQuerier) LabelValues(name string) ([]string, storage.Warnings, error) { + if name == defaultTenantLabel { + return m.tenantIDs, nil, nil + } + + // ensure the name of a retained tenant id label gets handled under the + // original label name + if name == originalDefaultTenantLabel { + name = defaultTenantLabel + } + + return m.mergeDistinctStringSlice(func(q storage.Querier) ([]string, storage.Warnings, error) { + return q.LabelValues(name) + }) +} + +// LabelNames returns all the unique label names present in the underlying +// queriers. It also adds the defaultTenantLabel and if present in the original +// results the originalDefaultTenantLabel +func (m *mergeQuerier) LabelNames() ([]string, storage.Warnings, error) { + labelNames, warnings, err := m.mergeDistinctStringSlice(func(q storage.Querier) ([]string, storage.Warnings, error) { + return q.LabelNames() + }) + if err != nil { + return nil, nil, err + } + + // check if the tenant label exists in the original result + var tenantLabelExists bool + labelPos := sort.SearchStrings(labelNames, defaultTenantLabel) + if labelPos < len(labelNames) && labelNames[labelPos] == defaultTenantLabel { + tenantLabelExists = true + } + + labelToAdd := defaultTenantLabel + + // if defaultTenantLabel already exists, we need to add the + // originalDefaultTenantLabel + if tenantLabelExists { + labelToAdd = originalDefaultTenantLabel + labelPos = sort.SearchStrings(labelNames, labelToAdd) + } + + // insert label at the correct position + labelNames = append(labelNames, "") + copy(labelNames[labelPos+1:], labelNames[labelPos:]) + labelNames[labelPos] = labelToAdd + + return labelNames, warnings, nil +} + +type stringSliceFunc func(storage.Querier) ([]string, storage.Warnings, error) + +// mergeDistinctStringSlice is aggregating results from stringSliceFunc calls +// on a querier. It removes duplicates and sorts the result. It doesn't require +// the output of the stringSliceFunc to be sorted, as results of LabelValues +// are not sorted. +// +// TODO: Consider running stringSliceFunc calls concurrently +func (m *mergeQuerier) mergeDistinctStringSlice(f stringSliceFunc) ([]string, storage.Warnings, error) { + var warnings storage.Warnings + resultMap := make(map[string]struct{}) + for pos, tenantID := range m.tenantIDs { + result, resultWarnings, err := f(m.queriers[pos]) + if err != nil { + return nil, nil, err + } + for _, e := range result { + resultMap[e] = struct{}{} + } + for _, w := range resultWarnings { + warnings = append(warnings, fmt.Errorf("error querying tenant id %s: %w", tenantID, w)) + } + } + + var result = make([]string, 0, len(resultMap)) + for e := range resultMap { + result = append(result, e) + } + sort.Strings(result) + return result, warnings, nil +} + +// Close releases the resources of the Querier. +func (m *mergeQuerier) Close() error { + errs := tsdb_errors.NewMulti() + for pos, tenantID := range m.tenantIDs { + errs.Add(errors.Wrapf(m.queriers[pos].Close(), "failed to close querier for tenant id %s", tenantID)) + } + return errs.Err() +} + +// Select returns a set of series that matches the given label matchers. If the +// tenantLabelName is matched on it only considers those queriers matching. The +// forwarded labelSelector is not containing those that operate on +// tenantLabelName. +func (m *mergeQuerier) Select(sortSeries bool, hints *storage.SelectHints, matchers ...*labels.Matcher) storage.SeriesSet { + matchedTenants, filteredMatchers := filterValuesByMatchers(defaultTenantLabel, m.tenantIDs, matchers...) + var seriesSets = make([]storage.SeriesSet, 0, len(matchedTenants)) + for pos, tenantID := range m.tenantIDs { + if _, matched := matchedTenants[tenantID]; !matched { + continue + } + seriesSets = append(seriesSets, &addLabelsSeriesSet{ + // TODO: Consider running Select calls concurrently + upstream: m.queriers[pos].Select(sortSeries, hints, filteredMatchers...), + labels: labels.Labels{ + { + Name: defaultTenantLabel, + Value: tenantID, + }, + }, + }) + } + return storage.NewMergeSeriesSet(seriesSets, storage.ChainedSeriesMerge) +} + +// filterValuesByMatchers applies matchers to inputed labelName and +// labelValues. A map of matched values is returned and also all label matchers +// not matching the labelName. +// In case a label matcher is set on a label conflicting with tenantLabelName, +// we need to rename this labelMatcher's name to its original name. This is +// used to as part of Select in the mergeQueryable, to ensure only relevant +// queries are considered and the forwarded matchers do not contain matchers on +// the tenantLabelName. +func filterValuesByMatchers(labelName string, labelValues []string, matchers ...*labels.Matcher) (matchedValues map[string]struct{}, unrelatedMatchers []*labels.Matcher) { + // this contains the matchers which are not related to labelName + unrelatedMatchers = make([]*labels.Matcher, 0, len(matchers)) + + // build map of values to consider for the matchers + matchedValues = make(map[string]struct{}, len(labelValues)) + for _, value := range labelValues { + matchedValues[value] = struct{}{} + } + + for _, m := range matchers { + if m.Name != labelName { + // check if has the retained label name + if m.Name == originalDefaultTenantLabel { + // rewrite label to the original name, by copying matcher and + // replacing the label name + rewrittenM := *m + rewrittenM.Name = labelName + unrelatedMatchers = append(unrelatedMatchers, &rewrittenM) + } else { + unrelatedMatchers = append(unrelatedMatchers, m) + } + continue + } + + for value := range matchedValues { + if !m.Matches(value) { + delete(matchedValues, value) + } + } + } + + return matchedValues, unrelatedMatchers +} + +type addLabelsSeriesSet struct { + upstream storage.SeriesSet + labels labels.Labels +} + +func (m *addLabelsSeriesSet) Next() bool { + return m.upstream.Next() +} + +// At returns full series. Returned series should be iteratable even after Next is called. +func (m *addLabelsSeriesSet) At() storage.Series { + return &addLabelsSeries{ + upstream: m.upstream.At(), + labels: m.labels, + } +} + +// The error that iteration as failed with. +// When an error occurs, set cannot continue to iterate. +func (m *addLabelsSeriesSet) Err() error { + return m.upstream.Err() +} + +// A collection of warnings for the whole set. +// Warnings could be return even iteration has not failed with error. +func (m *addLabelsSeriesSet) Warnings() storage.Warnings { + return m.upstream.Warnings() +} + +type addLabelsSeries struct { + upstream storage.Series + labels labels.Labels +} + +// Labels returns the complete set of labels. For series it means all labels identifying the series. +func (a *addLabelsSeries) Labels() labels.Labels { + return setLabelsRetainExisting(a.upstream.Labels(), a.labels...) +} + +// Iterator returns a new, independent iterator of the data of the series. +func (a *addLabelsSeries) Iterator() chunkenc.Iterator { + return a.upstream.Iterator() +} + +// this sets a label and preserves an existing value a new label prefixed with +// original_. It doesn't do this recursively. +func setLabelsRetainExisting(src labels.Labels, additionalLabels ...labels.Label) labels.Labels { + lb := labels.NewBuilder(src) + + for _, additionalL := range additionalLabels { + if oldValue := src.Get(additionalL.Name); oldValue != "" { + lb.Set( + retainExistingPrefix+additionalL.Name, + oldValue, + ) + } + lb.Set(additionalL.Name, additionalL.Value) + } + + return lb.Labels() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go b/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go new file mode 100644 index 00000000000..af5bd7b929e --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/tenantfederation/tenant_federation.go @@ -0,0 +1,14 @@ +package tenantfederation + +import ( + "flag" +) + +type Config struct { + // Enabled switches on support for multi tenant query federation + Enabled bool `yaml:"enabled"` +} + +func (cfg *Config) RegisterFlags(f *flag.FlagSet) { + f.BoolVar(&cfg.Enabled, "tenant-federation.enabled", false, "If enabled on all Cortex services, queries can be federated across multiple tenants. The tenant IDs involved need to be specified separated by a `|` character in the `X-Scope-OrgID` header (experimental).") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go index 89bd6967168..cda955d6a6c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/frontend_processor.go @@ -26,20 +26,18 @@ var ( func newFrontendProcessor(cfg Config, handler RequestHandler, log log.Logger) processor { return &frontendProcessor{ - log: log, - handler: handler, - maxMessageSize: cfg.GRPCClientConfig.GRPC.MaxSendMsgSize, - querierID: cfg.QuerierID, - queryStatsEnabled: cfg.QueryStatsEnabled, + log: log, + handler: handler, + maxMessageSize: cfg.GRPCClientConfig.MaxSendMsgSize, + querierID: cfg.QuerierID, } } // Handles incoming queries from frontend. type frontendProcessor struct { - handler RequestHandler - maxMessageSize int - querierID string - queryStatsEnabled bool + handler RequestHandler + maxMessageSize int + querierID string log log.Logger } @@ -86,7 +84,7 @@ func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) erro // and cancel the query. We don't actually handle queries in parallel // here, as we're running in lock step with the server - each Recv is // paired with a Send. - go fp.runRequest(ctx, request.HttpRequest, func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error { + go fp.runRequest(ctx, request.HttpRequest, request.StatsEnabled, func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error { return c.Send(&frontendv1pb.ClientToFrontend{ HttpResponse: response, Stats: stats, @@ -105,9 +103,9 @@ func (fp *frontendProcessor) process(c frontendv1pb.Frontend_ProcessClient) erro } } -func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, sendHTTPResponse func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error) { +func (fp *frontendProcessor) runRequest(ctx context.Context, request *httpgrpc.HTTPRequest, statsEnabled bool, sendHTTPResponse func(response *httpgrpc.HTTPResponse, stats *stats.Stats) error) { var stats *querier_stats.Stats - if fp.queryStatsEnabled { + if statsEnabled { stats, ctx = querier_stats.ContextWithEmptyStats(ctx) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go index c92da21f704..02ff2387864 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/scheduler_processor.go @@ -26,18 +26,18 @@ import ( "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/grpcutil" + util_log "github.com/cortexproject/cortex/pkg/util/log" cortex_middleware "github.com/cortexproject/cortex/pkg/util/middleware" "github.com/cortexproject/cortex/pkg/util/services" ) func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, reg prometheus.Registerer) (*schedulerProcessor, []services.Service) { p := &schedulerProcessor{ - log: log, - handler: handler, - maxMessageSize: cfg.GRPCClientConfig.GRPC.MaxSendMsgSize, - querierID: cfg.QuerierID, - grpcConfig: cfg.GRPCClientConfig, - queryStatsEnabled: cfg.QueryStatsEnabled, + log: log, + handler: handler, + maxMessageSize: cfg.GRPCClientConfig.MaxSendMsgSize, + querierID: cfg.QuerierID, + grpcConfig: cfg.GRPCClientConfig, frontendClientRequestDuration: promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Name: "cortex_querier_query_frontend_request_duration_seconds", @@ -63,12 +63,11 @@ func newSchedulerProcessor(cfg Config, handler RequestHandler, log log.Logger, r // Handles incoming queries from query-scheduler. type schedulerProcessor struct { - log log.Logger - handler RequestHandler - grpcConfig grpcclient.ConfigWithTLS - maxMessageSize int - querierID string - queryStatsEnabled bool + log log.Logger + handler RequestHandler + grpcConfig grpcclient.Config + maxMessageSize int + querierID string frontendPool *client.Pool frontendClientRequestDuration *prometheus.HistogramVec @@ -130,9 +129,9 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer ctx = spanCtx } - logger := util.WithContext(ctx, sp.log) + logger := util_log.WithContext(ctx, sp.log) - sp.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.HttpRequest) + sp.runRequest(ctx, logger, request.QueryID, request.FrontendAddress, request.StatsEnabled, request.HttpRequest) // Report back to scheduler that processing of the query has finished. if err := c.Send(&schedulerpb.QuerierToScheduler{}); err != nil { @@ -142,9 +141,9 @@ func (sp *schedulerProcessor) querierLoop(c schedulerpb.SchedulerForQuerier_Quer } } -func (sp *schedulerProcessor) runRequest(ctx context.Context, logger log.Logger, queryID uint64, frontendAddress string, request *httpgrpc.HTTPRequest) { +func (sp *schedulerProcessor) runRequest(ctx context.Context, logger log.Logger, queryID uint64, frontendAddress string, statsEnabled bool, request *httpgrpc.HTTPRequest) { var stats *querier_stats.Stats - if sp.queryStatsEnabled { + if statsEnabled { stats, ctx = querier_stats.ContextWithEmptyStats(ctx) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go index ac47c11a55f..a18ec7564ed 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/querier/worker/worker.go @@ -30,10 +30,7 @@ type Config struct { QuerierID string `yaml:"id"` - GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config"` - - // The following config is injected internally. - QueryStatsEnabled bool `yaml:"-"` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go index 102865ede1a..efe10149386 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler.go @@ -13,7 +13,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -21,7 +21,7 @@ type BasicLifecyclerDelegate interface { // OnRingInstanceRegister is called while the lifecycler is registering the // instance within the ring and should return the state and set of tokens to // use for the instance itself. - OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) + OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) // OnRingInstanceTokens is called once the instance tokens are set and are // stable within the ring (honoring the observe period, if set). @@ -34,7 +34,7 @@ type BasicLifecyclerDelegate interface { // OnRingInstanceHeartbeat is called while the instance is updating its heartbeat // in the ring. - OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) + OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) } type BasicLifecyclerConfig struct { @@ -77,7 +77,7 @@ type BasicLifecycler struct { // The current instance state. currState sync.RWMutex - currInstanceDesc *IngesterDesc + currInstanceDesc *InstanceDesc } // NewBasicLifecycler makes a new BasicLifecycler. @@ -194,7 +194,7 @@ func (l *BasicLifecycler) running(ctx context.Context) error { f() case <-ctx.Done(): - level.Info(util.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName) + level.Info(util_log.Logger).Log("msg", "ring lifecycler is shutting down", "ring", l.ringName) return nil } } @@ -239,7 +239,7 @@ heartbeatLoop: // registerInstance registers the instance in the ring. The initial state and set of tokens // depends on the OnRingInstanceRegister() delegate function. func (l *BasicLifecycler) registerInstance(ctx context.Context) error { - var instanceDesc IngesterDesc + var instanceDesc InstanceDesc err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) @@ -327,7 +327,7 @@ func (l *BasicLifecycler) waitStableTokens(ctx context.Context, period time.Dura func (l *BasicLifecycler) verifyTokens(ctx context.Context) bool { result := false - err := l.updateInstance(ctx, func(r *Desc, i *IngesterDesc) bool { + err := l.updateInstance(ctx, func(r *Desc, i *InstanceDesc) bool { // At this point, we should have the same tokens as we have registered before. actualTokens, takenTokens := r.TokensFor(l.cfg.ID) @@ -385,8 +385,8 @@ func (l *BasicLifecycler) unregisterInstance(ctx context.Context) error { return nil } -func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *IngesterDesc) bool) error { - var instanceDesc IngesterDesc +func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, *InstanceDesc) bool) error { + var instanceDesc InstanceDesc err := l.store.CAS(ctx, l.ringKey, func(in interface{}) (out interface{}, retry bool, err error) { ringDesc := GetOrCreateRingDesc(in) @@ -431,7 +431,7 @@ func (l *BasicLifecycler) updateInstance(ctx context.Context, update func(*Desc, // heartbeat updates the instance timestamp within the ring. This function is guaranteed // to be called within the lifecycler main goroutine. func (l *BasicLifecycler) heartbeat(ctx context.Context) { - err := l.updateInstance(ctx, func(r *Desc, i *IngesterDesc) bool { + err := l.updateInstance(ctx, func(r *Desc, i *InstanceDesc) bool { l.delegate.OnRingInstanceHeartbeat(l, r, i) i.Timestamp = time.Now().Unix() return true @@ -448,7 +448,7 @@ func (l *BasicLifecycler) heartbeat(ctx context.Context) { // changeState of the instance within the ring. This function is guaranteed // to be called within the lifecycler main goroutine. func (l *BasicLifecycler) changeState(ctx context.Context, state IngesterState) error { - err := l.updateInstance(ctx, func(_ *Desc, i *IngesterDesc) bool { + err := l.updateInstance(ctx, func(_ *Desc, i *InstanceDesc) bool { // No-op if the state hasn't changed. if i.State == state { return false diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go index f45a82e7c47..7126198e150 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/basic_lifecycler_delegates.go @@ -2,6 +2,7 @@ package ring import ( "context" + "os" "time" "github.com/go-kit/kit/log" @@ -20,7 +21,7 @@ func NewLeaveOnStoppingDelegate(next BasicLifecyclerDelegate, logger log.Logger) } } -func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { +func (d *LeaveOnStoppingDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) { return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) } @@ -36,7 +37,7 @@ func (d *LeaveOnStoppingDelegate) OnRingInstanceStopping(lifecycler *BasicLifecy d.next.OnRingInstanceStopping(lifecycler) } -func (d *LeaveOnStoppingDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) { +func (d *LeaveOnStoppingDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) { d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc) } @@ -56,7 +57,7 @@ func NewTokensPersistencyDelegate(path string, state IngesterState, next BasicLi } } -func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { +func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) { // Skip if no path has been configured. if d.tokensPath == "" { level.Info(d.logger).Log("msg", "not loading tokens from file, tokens file path is empty") @@ -71,14 +72,17 @@ func (d *TokensPersistencyDelegate) OnRingInstanceRegister(lifecycler *BasicLife tokensFromFile, err := LoadTokensFromFile(d.tokensPath) if err != nil { - level.Error(d.logger).Log("msg", "error in getting tokens from file", "err", err) + if !os.IsNotExist(err) { + level.Error(d.logger).Log("msg", "error loading tokens from file", "err", err) + } + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) } // Signal the next delegate that the tokens have been loaded, miming the // case the instance exist in the ring (which is OK because the lifecycler // will correctly reconcile this case too). - return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), IngesterDesc{ + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, true, lifecycler.GetInstanceID(), InstanceDesc{ Addr: lifecycler.GetInstanceAddr(), Timestamp: time.Now().Unix(), RegisteredTimestamp: lifecycler.GetRegisteredAt().Unix(), @@ -102,7 +106,7 @@ func (d *TokensPersistencyDelegate) OnRingInstanceStopping(lifecycler *BasicLife d.next.OnRingInstanceStopping(lifecycler) } -func (d *TokensPersistencyDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) { +func (d *TokensPersistencyDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) { d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc) } @@ -122,7 +126,7 @@ func NewAutoForgetDelegate(forgetPeriod time.Duration, next BasicLifecyclerDeleg } } -func (d *AutoForgetDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc IngesterDesc) (IngesterState, Tokens) { +func (d *AutoForgetDelegate) OnRingInstanceRegister(lifecycler *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (IngesterState, Tokens) { return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) } @@ -134,7 +138,7 @@ func (d *AutoForgetDelegate) OnRingInstanceStopping(lifecycler *BasicLifecycler) d.next.OnRingInstanceStopping(lifecycler) } -func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *IngesterDesc) { +func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler, ringDesc *Desc, instanceDesc *InstanceDesc) { for id, instance := range ringDesc.Ingesters { lastHeartbeat := time.Unix(instance.GetTimestamp(), 0) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go index 89a24656aac..c24dc200dee 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/batch.go @@ -15,8 +15,8 @@ type batchTracker struct { err chan error } -type ingester struct { - desc IngesterDesc +type instance struct { + desc InstanceDesc itemTrackers []*itemTracker indexes []int } @@ -30,26 +30,29 @@ type itemTracker struct { // DoBatch request against a set of keys in the ring, handling replication and // failures. For example if we want to write N items where they may all -// hit different ingesters, and we want them all replicated R ways with +// hit different instances, and we want them all replicated R ways with // quorum writes, we track the relationship between batch RPCs and the items // within them. // -// Callback is passed the ingester to target, and the indexes of the keys -// to send to that ingester. +// Callback is passed the instance to target, and the indexes of the keys +// to send to that instance. // // Not implemented as a method on Ring so we can test separately. -func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(IngesterDesc, []int) error, cleanup func()) error { - if r.IngesterCount() <= 0 { - return fmt.Errorf("DoBatch: IngesterCount <= 0") +func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { + if r.InstancesCount() <= 0 { + return fmt.Errorf("DoBatch: InstancesCount <= 0") } - expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.IngesterCount() + expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() itemTrackers := make([]itemTracker, len(keys)) - ingesters := make(map[string]ingester, r.IngesterCount()) + instances := make(map[string]instance, r.InstancesCount()) - const maxExpectedReplicationSet = 5 // Typical replication factor 3, plus one for inactive plus one for luck. - var descs [maxExpectedReplicationSet]IngesterDesc + var ( + bufDescs [GetBufferSize]InstanceDesc + bufHosts [GetBufferSize]string + bufZones [GetBufferSize]string + ) for i, key := range keys { - replicationSet, err := r.Get(key, Write, descs[:0]) + replicationSet, err := r.Get(key, op, bufDescs[:0], bufHosts[:0], bufZones[:0]) if err != nil { return err } @@ -57,12 +60,12 @@ func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(Inges itemTrackers[i].maxFailures = replicationSet.MaxErrors for _, desc := range replicationSet.Ingesters { - curr, found := ingesters[desc.Addr] + curr, found := instances[desc.Addr] if !found { curr.itemTrackers = make([]*itemTracker, 0, expectedTrackers) curr.indexes = make([]int, 0, expectedTrackers) } - ingesters[desc.Addr] = ingester{ + instances[desc.Addr] = instance{ desc: desc, itemTrackers: append(curr.itemTrackers, &itemTrackers[i]), indexes: append(curr.indexes, i), @@ -78,9 +81,9 @@ func DoBatch(ctx context.Context, r ReadRing, keys []uint32, callback func(Inges var wg sync.WaitGroup - wg.Add(len(ingesters)) - for _, i := range ingesters { - go func(i ingester) { + wg.Add(len(instances)) + for _, i := range instances { + go func(i instance) { err := callback(i.desc, i.indexes) tracker.record(i.itemTrackers, err) wg.Done() @@ -108,7 +111,7 @@ func (b *batchTracker) record(sampleTrackers []*itemTracker, err error) { // If we succeed, decrement each sample's pending count by one. If we reach // the required number of successful puts on this sample, then decrement the // number of pending samples by one. If we successfully push all samples to - // min success ingesters, wake up the waiting rpc so it can return early. + // min success instances, wake up the waiting rpc so it can return early. // Similarly, track the number of errors, and if it exceeds maxFailures // shortcut the waiting rpc. // diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go index 41bc8728abb..39da1db688a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/pool.go @@ -14,6 +14,7 @@ import ( "google.golang.org/grpc/health/grpc_health_v1" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -158,7 +159,7 @@ func (p *Pool) removeStaleClients() { serviceAddrs, err := p.discovery() if err != nil { - level.Error(util.Logger).Log("msg", "error removing stale clients", "err", err) + level.Error(util_log.Logger).Log("msg", "error removing stale clients", "err", err) return } @@ -166,7 +167,7 @@ func (p *Pool) removeStaleClients() { if util.StringsContain(serviceAddrs, addr) { continue } - level.Info(util.Logger).Log("msg", "removing stale client", "addr", addr) + level.Info(util_log.Logger).Log("msg", "removing stale client", "addr", addr) p.RemoveClientFor(addr) } } @@ -179,7 +180,7 @@ func (p *Pool) cleanUnhealthy() { if ok { err := healthCheck(client, p.cfg.HealthCheckTimeout) if err != nil { - level.Warn(util.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) + level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("removing %s failing healthcheck", p.clientName), "addr", addr, "reason", err) p.RemoveClientFor(addr) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go index e0ab7ce64b9..1706edb2a17 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/client/ring_service_discovery.go @@ -1,12 +1,17 @@ package client import ( + "errors" + "github.com/cortexproject/cortex/pkg/ring" ) func NewRingServiceDiscovery(r ring.ReadRing) PoolServiceDiscovery { return func() ([]string, error) { replicationSet, err := r.GetAllHealthy(ring.Reporting) + if errors.Is(err, ring.ErrEmptyRing) { + return nil, nil + } if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go index 4ab9bd9f961..59c3fdafcba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/http.go @@ -12,6 +12,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" ) const pageContent = ` @@ -107,7 +108,7 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { if req.Method == http.MethodPost { ingesterID := req.FormValue("forget") if err := r.forget(req.Context(), ingesterID); err != nil { - level.Error(util.WithContext(req.Context(), util.Logger)).Log("msg", "error forgetting instance", "err", err) + level.Error(log.WithContext(req.Context(), log.Logger)).Log("msg", "error forgetting instance", "err", err) } // Implement PRG pattern to prevent double-POST and work with CSRF middleware. @@ -131,13 +132,14 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { } sort.Strings(ingesterIDs) + now := time.Now() ingesters := []interface{}{} - _, owned := countTokens(r.ringDesc, r.ringTokens) + _, owned := r.countTokens() for _, id := range ingesterIDs { ing := r.ringDesc.Ingesters[id] heartbeatTimestamp := time.Unix(ing.Timestamp, 0) state := ing.State.String() - if !r.IsHealthy(&ing, Reporting) { + if !r.IsHealthy(&ing, Reporting, now) { state = unhealthy } @@ -178,7 +180,7 @@ func (r *Ring) ServeHTTP(w http.ResponseWriter, req *http.Request) { ShowTokens bool `json:"-"` }{ Ingesters: ingesters, - Now: time.Now(), + Now: now, ShowTokens: tokensParam == "true", }, pageTemplate, req) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go index 9b5eeaeef1f..1c39a473ced 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/client.go @@ -2,6 +2,7 @@ package consul import ( "context" + "errors" "flag" "fmt" "math/rand" @@ -16,6 +17,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -142,14 +144,14 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou options := &consul.QueryOptions{} kvp, _, err := c.kv.Get(key, options.WithContext(ctx)) if err != nil { - level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting key", "key", key, "err", err) continue } var intermediate interface{} if kvp != nil { out, err := c.codec.Decode(kvp.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } // If key doesn't exist, index will be 0. @@ -173,7 +175,7 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou bytes, err := c.codec.Encode(intermediate) if err != nil { - level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error serialising value", "key", key, "err", err) continue } ok, _, err := c.kv.CAS(&consul.KVPair{ @@ -182,11 +184,11 @@ func (c *Client) cas(ctx context.Context, key string, f func(in interface{}) (ou ModifyIndex: index, }, writeOptions.WithContext(ctx)) if err != nil { - level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error CASing", "key", key, "err", err) continue } if !ok { - level.Debug(util.Logger).Log("msg", "error CASing, trying again", "key", key, "index", index) + level.Debug(util_log.Logger).Log("msg", "error CASing, trying again", "key", key, "index", index) continue } return nil @@ -209,7 +211,10 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b for backoff.Ongoing() { err := limiter.Wait(ctx) if err != nil { - level.Error(util.Logger).Log("msg", "error while rate-limiting", "key", key, "err", err) + if errors.Is(err, context.Canceled) { + break + } + level.Error(util_log.Logger).Log("msg", "error while rate-limiting", "key", key, "err", err) backoff.Wait() continue } @@ -226,7 +231,7 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b // Don't backoff if value is not found (kvp == nil). In that case, Consul still returns index value, // and next call to Get will block as expected. We handle missing value below. if err != nil { - level.Error(util.Logger).Log("msg", "error getting path", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting path", "key", key, "err", err) backoff.Wait() continue } @@ -239,13 +244,13 @@ func (c *Client) WatchKey(ctx context.Context, key string, f func(interface{}) b } if kvp == nil { - level.Info(util.Logger).Log("msg", "value is nil", "key", key, "index", index) + level.Info(util_log.Logger).Log("msg", "value is nil", "key", key, "index", index) continue } out, err := c.codec.Decode(kvp.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } if !f(out) { @@ -266,7 +271,10 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, for backoff.Ongoing() { err := limiter.Wait(ctx) if err != nil { - level.Error(util.Logger).Log("msg", "error while rate-limiting", "prefix", prefix, "err", err) + if errors.Is(err, context.Canceled) { + break + } + level.Error(util_log.Logger).Log("msg", "error while rate-limiting", "prefix", prefix, "err", err) backoff.Wait() continue } @@ -282,7 +290,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, // kvps being nil here is not an error -- quite the opposite. Consul returns index, // which makes next query blocking, so there is no need to detect this and act on it. if err != nil { - level.Error(util.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting path", "prefix", prefix, "err", err) backoff.Wait() continue } @@ -302,7 +310,7 @@ func (c *Client) WatchPrefix(ctx context.Context, prefix string, f func(string, out, err := c.codec.Decode(kvp.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding list of values for prefix:key", "prefix", prefix, "key", kvp.Key, "err", err) continue } if !f(kvp.Key, out) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go index 708bea76205..5d1e4557395 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/consul/mock.go @@ -10,7 +10,7 @@ import ( consul "github.com/hashicorp/consul/api" "github.com/cortexproject/cortex/pkg/ring/kv/codec" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type mockKV struct { @@ -78,12 +78,12 @@ func (m *mockKV) Put(p *consul.KVPair, q *consul.WriteOptions) (*consul.WriteMet m.cond.Broadcast() - level.Debug(util.Logger).Log("msg", "Put", "key", p.Key, "value", fmt.Sprintf("%.40q", p.Value), "modify_index", m.current) + level.Debug(util_log.Logger).Log("msg", "Put", "key", p.Key, "value", fmt.Sprintf("%.40q", p.Value), "modify_index", m.current) return nil, nil } func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.WriteMeta, error) { - level.Debug(util.Logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value)) + level.Debug(util_log.Logger).Log("msg", "CAS", "key", p.Key, "modify_index", p.ModifyIndex, "value", fmt.Sprintf("%.40q", p.Value)) m.mtx.Lock() defer m.mtx.Unlock() @@ -110,14 +110,14 @@ func (m *mockKV) CAS(p *consul.KVPair, q *consul.WriteOptions) (bool, *consul.Wr } func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consul.QueryMeta, error) { - level.Debug(util.Logger).Log("msg", "Get", "key", key, "wait_index", q.WaitIndex) + level.Debug(util_log.Logger).Log("msg", "Get", "key", key, "wait_index", q.WaitIndex) m.mtx.Lock() defer m.mtx.Unlock() value := m.kvps[key] if value == nil && q.WaitIndex == 0 { - level.Debug(util.Logger).Log("msg", "Get - not found", "key", key) + level.Debug(util_log.Logger).Log("msg", "Get - not found", "key", key) return nil, &consul.QueryMeta{LastIndex: m.current}, nil } @@ -146,17 +146,17 @@ func (m *mockKV) Get(key string, q *consul.QueryOptions) (*consul.KVPair, *consu } } if time.Now().After(deadline) { - level.Debug(util.Logger).Log("msg", "Get - deadline exceeded", "key", key) + level.Debug(util_log.Logger).Log("msg", "Get - deadline exceeded", "key", key) return nil, &consul.QueryMeta{LastIndex: q.WaitIndex}, nil } } if value == nil { - level.Debug(util.Logger).Log("msg", "Get - not found", "key", key) + level.Debug(util_log.Logger).Log("msg", "Get - not found", "key", key) return nil, &consul.QueryMeta{LastIndex: m.current}, nil } - level.Debug(util.Logger).Log("msg", "Get", "key", key, "modify_index", value.ModifyIndex, "value", fmt.Sprintf("%.40q", value.Value)) + level.Debug(util_log.Logger).Log("msg", "Get", "key", key, "modify_index", value.ModifyIndex, "value", fmt.Sprintf("%.40q", value.Value)) return copyKVPair(value), &consul.QueryMeta{LastIndex: value.ModifyIndex}, nil } @@ -203,7 +203,7 @@ func (m *mockKV) ResetIndex() { m.current = 0 m.cond.Broadcast() - level.Debug(util.Logger).Log("msg", "Reset") + level.Debug(util_log.Logger).Log("msg", "Reset") } func (m *mockKV) ResetIndexForKey(key string) { @@ -215,7 +215,7 @@ func (m *mockKV) ResetIndexForKey(key string) { } m.cond.Broadcast() - level.Debug(util.Logger).Log("msg", "ResetIndexForKey", "key", key) + level.Debug(util_log.Logger).Log("msg", "ResetIndexForKey", "key", key) } // mockedMaxWaitTime returns the minimum duration between the input duration diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go index f63ccf81988..222fb4ee319 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/etcd/etcd.go @@ -15,18 +15,17 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" + cortex_tls "github.com/cortexproject/cortex/pkg/util/tls" ) // Config for a new etcd.Client. type Config struct { - Endpoints []string `yaml:"endpoints"` - DialTimeout time.Duration `yaml:"dial_timeout"` - MaxRetries int `yaml:"max_retries"` - EnableTLS bool `yaml:"tls_enabled"` - CertFile string `yaml:"tls_cert_path"` - KeyFile string `yaml:"tls_key_path"` - TrustedCAFile string `yaml:"tls_ca_path"` - InsecureSkipVerify bool `yaml:"tls_insecure_skip_verify"` + Endpoints []string `yaml:"endpoints"` + DialTimeout time.Duration `yaml:"dial_timeout"` + MaxRetries int `yaml:"max_retries"` + EnableTLS bool `yaml:"tls_enabled"` + TLS cortex_tls.ClientConfig `yaml:",inline"` } // Client implements ring.KVClient for etcd. @@ -43,10 +42,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.DurationVar(&cfg.DialTimeout, prefix+"etcd.dial-timeout", 10*time.Second, "The dial timeout for the etcd connection.") f.IntVar(&cfg.MaxRetries, prefix+"etcd.max-retries", 10, "The maximum number of retries to do for failed ops.") f.BoolVar(&cfg.EnableTLS, prefix+"etcd.tls-enabled", false, "Enable TLS.") - f.StringVar(&cfg.CertFile, prefix+"etcd.tls-cert-path", "", "The TLS certificate file path.") - f.StringVar(&cfg.KeyFile, prefix+"etcd.tls-key-path", "", "The TLS private key file path.") - f.StringVar(&cfg.TrustedCAFile, prefix+"etcd.tls-ca-path", "", "The trusted CA file path.") - f.BoolVar(&cfg.InsecureSkipVerify, prefix+"etcd.tls-insecure-skip-verify", false, "Skip validating server certificate.") + cfg.TLS.RegisterFlagsWithPrefix(prefix+"etcd", f) } // GetTLS sets the TLS config field with certs @@ -55,10 +51,11 @@ func (cfg *Config) GetTLS() (*tls.Config, error) { return nil, nil } tlsInfo := &transport.TLSInfo{ - CertFile: cfg.CertFile, - KeyFile: cfg.KeyFile, - TrustedCAFile: cfg.TrustedCAFile, - InsecureSkipVerify: cfg.InsecureSkipVerify, + CertFile: cfg.TLS.CertPath, + KeyFile: cfg.TLS.KeyPath, + TrustedCAFile: cfg.TLS.CAPath, + ServerName: cfg.TLS.ServerName, + InsecureSkipVerify: cfg.TLS.InsecureSkipVerify, } return tlsInfo.ClientConfig() } @@ -110,7 +107,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou for i := 0; i < c.cfg.MaxRetries; i++ { resp, err := c.cli.Get(ctx, key) if err != nil { - level.Error(util.Logger).Log("msg", "error getting key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error getting key", "key", key, "err", err) lastErr = err continue } @@ -119,7 +116,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou if len(resp.Kvs) > 0 { intermediate, err = c.codec.Decode(resp.Kvs[0].Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) lastErr = err continue } @@ -143,7 +140,7 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou buf, err := c.codec.Encode(intermediate) if err != nil { - level.Error(util.Logger).Log("msg", "error serialising value", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error serialising value", "key", key, "err", err) lastErr = err continue } @@ -153,13 +150,13 @@ func (c *Client) CAS(ctx context.Context, key string, f func(in interface{}) (ou Then(clientv3.OpPut(key, string(buf))). Commit() if err != nil { - level.Error(util.Logger).Log("msg", "error CASing", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error CASing", "key", key, "err", err) lastErr = err continue } // result is not Succeeded if the the comparison was false, meaning if the modify indexes did not match. if !result.Succeeded { - level.Debug(util.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision) + level.Debug(util_log.Logger).Log("msg", "failed to CAS, revision and version did not match in etcd", "key", key, "revision", revision) continue } @@ -187,7 +184,7 @@ outer: for backoff.Ongoing() { for resp := range c.cli.Watch(watchCtx, key) { if err := resp.Err(); err != nil { - level.Error(util.Logger).Log("msg", "watch error", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "watch error", "key", key, "err", err) continue outer } @@ -196,7 +193,7 @@ outer: for _, event := range resp.Events { out, err := c.codec.Decode(event.Kv.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } @@ -223,7 +220,7 @@ outer: for backoff.Ongoing() { for resp := range c.cli.Watch(watchCtx, key, clientv3.WithPrefix()) { if err := resp.Err(); err != nil { - level.Error(util.Logger).Log("msg", "watch error", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "watch error", "key", key, "err", err) continue outer } @@ -232,7 +229,7 @@ outer: for _, event := range resp.Events { out, err := c.codec.Decode(event.Kv.Value) if err != nil { - level.Error(util.Logger).Log("msg", "error decoding key", "key", key, "err", err) + level.Error(util_log.Logger).Log("msg", "error decoding key", "key", key, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go index c4687721233..f739b67241c 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/broadcast.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/hashicorp/memberlist" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // ringBroadcast implements memberlist.Broadcast interface, which is used by memberlist.TransmitLimitedQueue. @@ -45,7 +45,7 @@ func (r ringBroadcast) Invalidates(old memberlist.Broadcast) bool { // otherwise, we may be invalidating some older messages, which however covered different // ingesters if r.version >= oldb.version { - level.Debug(util.Logger).Log("msg", "Invalidating forwarded broadcast", "key", r.key, "version", r.version, "oldVersion", oldb.version, "content", fmt.Sprintf("%v", r.content), "oldContent", fmt.Sprintf("%v", oldb.content)) + level.Debug(util_log.Logger).Log("msg", "Invalidating forwarded broadcast", "key", r.key, "version", r.version, "oldVersion", oldb.version, "content", fmt.Sprintf("%v", r.content), "oldContent", fmt.Sprintf("%v", oldb.content)) return true } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go index de0acddb0d6..1fbed32c313 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/kv_init_service.go @@ -2,10 +2,22 @@ package memberlist import ( "context" + "encoding/json" + "fmt" + "html/template" + "net/http" + "sort" + "strconv" + "strings" "sync" + "time" "github.com/go-kit/kit/log" + "github.com/hashicorp/memberlist" + "go.uber.org/atomic" + "github.com/cortexproject/cortex/pkg/ring/kv/codec" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -22,7 +34,7 @@ type KVInitService struct { init sync.Once // state - kv *KV + kv atomic.Value err error watcher *services.FailureWatcher } @@ -40,12 +52,23 @@ func NewKVInitService(cfg *KVConfig, logger log.Logger) *KVInitService { // This method will initialize Memberlist.KV on first call, and add it to service failure watcher. func (kvs *KVInitService) GetMemberlistKV() (*KV, error) { kvs.init.Do(func() { - kvs.kv = NewKV(*kvs.cfg, kvs.logger) - kvs.watcher.WatchService(kvs.kv) - kvs.err = kvs.kv.StartAsync(context.Background()) + kv := NewKV(*kvs.cfg, kvs.logger) + kvs.watcher.WatchService(kv) + kvs.err = kv.StartAsync(context.Background()) + + kvs.kv.Store(kv) }) - return kvs.kv, kvs.err + return kvs.getKV(), kvs.err +} + +// Returns KV if it was initialized, or nil. +func (kvs *KVInitService) getKV() *KV { + kv := kvs.kv.Load() + if kv == nil { + return nil + } + return kv.(*KV) } func (kvs *KVInitService) running(ctx context.Context) error { @@ -59,9 +82,318 @@ func (kvs *KVInitService) running(ctx context.Context) error { } func (kvs *KVInitService) stopping(_ error) error { - if kvs.kv == nil { + kv := kvs.getKV() + if kv == nil { return nil } - return services.StopAndAwaitTerminated(context.Background(), kvs.kv) + return services.StopAndAwaitTerminated(context.Background(), kv) +} + +func (kvs *KVInitService) ServeHTTP(w http.ResponseWriter, req *http.Request) { + kv := kvs.getKV() + if kv == nil { + util.WriteTextResponse(w, "This Cortex instance doesn't use memberlist.") + return + } + + const ( + downloadKeyParam = "downloadKey" + viewKeyParam = "viewKey" + viewMsgParam = "viewMsg" + deleteMessagesParam = "deleteMessages" + ) + + if err := req.ParseForm(); err == nil { + if req.Form[downloadKeyParam] != nil { + downloadKey(w, kv.storeCopy(), req.Form[downloadKeyParam][0]) // Use first value, ignore the rest. + return + } + + if req.Form[viewKeyParam] != nil { + viewKey(w, kv, kv.storeCopy(), req.Form[viewKeyParam][0], getFormat(req)) + return + } + + if req.Form[viewMsgParam] != nil { + msgID, err := strconv.Atoi(req.Form[viewMsgParam][0]) + if err != nil { + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + sent, received := kv.getSentAndReceivedMessages() + + for _, m := range append(sent, received...) { + if m.ID == msgID { + viewMessage(w, kv, m, getFormat(req)) + return + } + } + + http.Error(w, "message not found", http.StatusNotFound) + return + } + + if len(req.Form[deleteMessagesParam]) > 0 && req.Form[deleteMessagesParam][0] == "true" { + kv.deleteSentReceivedMessages() + + // Redirect back. + w.Header().Set("Location", "?"+deleteMessagesParam+"=false") + w.WriteHeader(http.StatusFound) + return + } + } + + members := kv.memberlist.Members() + sort.Slice(members, func(i, j int) bool { + return members[i].Name < members[j].Name + }) + + sent, received := kv.getSentAndReceivedMessages() + + util.RenderHTTPResponse(w, pageData{ + Now: time.Now(), + Memberlist: kv.memberlist, + SortedMembers: members, + Store: kv.storeCopy(), + SentMessages: sent, + ReceivedMessages: received, + }, pageTemplate, req) +} + +func getFormat(req *http.Request) string { + const viewFormat = "format" + + format := "" + if len(req.Form[viewFormat]) > 0 { + format = req.Form[viewFormat][0] + } + return format +} + +func viewMessage(w http.ResponseWriter, kv *KV, msg message, format string) { + c := kv.GetCodec(msg.Pair.Codec) + if c == nil { + http.Error(w, "codec not found", http.StatusNotFound) + return + } + + formatValue(w, c, msg.Pair.Value, format) +} + +func viewKey(w http.ResponseWriter, kv *KV, store map[string]valueDesc, key string, format string) { + if store[key].value == nil { + http.Error(w, "value not found", http.StatusNotFound) + return + } + + c := kv.GetCodec(store[key].codecID) + if c == nil { + http.Error(w, "codec not found", http.StatusNotFound) + return + } + + formatValue(w, c, store[key].value, format) } + +func formatValue(w http.ResponseWriter, codec codec.Codec, value []byte, format string) { + val, err := codec.Decode(value) + if err != nil { + http.Error(w, fmt.Sprintf("failed to decode: %v", err), http.StatusInternalServerError) + return + } + + w.WriteHeader(200) + w.Header().Add("content-type", "text/plain") + + switch format { + case "json", "json-pretty": + enc := json.NewEncoder(w) + if format == "json-pretty" { + enc.SetIndent("", " ") + } + + err = enc.Encode(val) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + + default: + _, _ = fmt.Fprintf(w, "%#v", val) + } +} + +func downloadKey(w http.ResponseWriter, store map[string]valueDesc, key string) { + if store[key].value == nil { + http.Error(w, "value not found", http.StatusNotFound) + return + } + + val := store[key] + + w.Header().Add("content-type", "application/octet-stream") + // Set content-length so that client knows whether it has received full response or not. + w.Header().Add("content-length", strconv.Itoa(len(val.value))) + w.Header().Add("content-disposition", fmt.Sprintf("attachment; filename=%d-%s", val.version, key)) + w.WriteHeader(200) + + // Ignore errors, we cannot do anything about them. + _, _ = w.Write(val.value) +} + +type pageData struct { + Now time.Time + Memberlist *memberlist.Memberlist + SortedMembers []*memberlist.Node + Store map[string]valueDesc + SentMessages []message + ReceivedMessages []message +} + +var pageTemplate = template.Must(template.New("webpage").Funcs(template.FuncMap{ + "StringsJoin": strings.Join, +}).Parse(pageContent)) + +const pageContent = ` + + + + + Cortex Memberlist Status + + +

Cortex Memberlist Status

+

Current time: {{ .Now }}

+ + + +

KV Store

+ + + + + + + + + + + + {{ range $k, $v := .Store }} + + + + + + {{ end }} + +
KeyValue DetailsActions
{{ $k }}{{ $v }} + json + | json-pretty + | struct + | download +
+ +

Note that value "version" is node-specific. It starts with 0 (on restart), and increases on each received update. Size is in bytes.

+ +

Memberlist Cluster Members

+ + + + + + + + + + + + {{ range .SortedMembers }} + + + + + + {{ end }} + +
NameAddressState
{{ .Name }}{{ .Address }}{{ .State }}
+ +

State: 0 = Alive, 1 = Suspect, 2 = Dead, 3 = Left

+ +

Received Messages

+ + Delete All Messages (received and sent) + + + + + + + + + + + + + + + + {{ range .ReceivedMessages }} + + + + + + + + + + {{ end }} + +
IDTimeKeyValue in the MessageVersion After Update (0 = no change)ChangesActions
{{ .ID }}{{ .Time.Format "15:04:05.000" }}{{ .Pair.Key }}size: {{ .Pair.Value | len }}, codec: {{ .Pair.Codec }}{{ .Version }}{{ StringsJoin .Changes ", " }} + json + | json-pretty + | struct +
+ +

Sent Messages

+ + Delete All Messages (received and sent) + + + + + + + + + + + + + + + + {{ range .SentMessages }} + + + + + + + + + + {{ end }} + +
IDTimeKeyValueVersionChangesActions
{{ .ID }}{{ .Time.Format "15:04:05.000" }}{{ .Pair.Key }}size: {{ .Pair.Value | len }}, codec: {{ .Pair.Codec }}{{ .Version }}{{ StringsJoin .Changes ", " }} + json + | json-pretty + | struct +
+ +` diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go index f61c7899dd7..056cc781718 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_client.go @@ -23,6 +23,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/codec" "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -150,6 +151,9 @@ type KVConfig struct { // Timeout used when leaving the memberlist cluster. LeaveTimeout time.Duration `yaml:"leave_timeout"` + // How much space to use to keep received and sent messages in memory (for troubleshooting). + MessageHistoryBufferBytes int `yaml:"message_history_buffer_bytes"` + TCPTransport TCPTransportConfig `yaml:",inline"` // Where to put custom metrics. Metrics are not registered, if this is nil. @@ -180,6 +184,7 @@ func (cfg *KVConfig) RegisterFlags(f *flag.FlagSet, prefix string) { f.DurationVar(&cfg.PushPullInterval, prefix+"memberlist.pullpush-interval", 0, "How often to use pull/push sync. Uses memberlist LAN defaults if 0.") f.DurationVar(&cfg.GossipToTheDeadTime, prefix+"memberlist.gossip-to-dead-nodes-time", 0, "How long to keep gossiping to dead nodes, to give them chance to refute their death. Uses memberlist LAN defaults if 0.") f.DurationVar(&cfg.DeadNodeReclaimTime, prefix+"memberlist.dead-node-reclaim-time", 0, "How soon can dead node's name be reclaimed with new address. Defaults to 0, which is disabled.") + f.IntVar(&cfg.MessageHistoryBufferBytes, prefix+"memberlist.message-history-buffer-bytes", 0, "How much space to use for keeping received and sent messages in memory for troubleshooting (two buffers). 0 to disable.") cfg.TCPTransport.RegisterFlags(f, prefix) } @@ -188,7 +193,7 @@ func generateRandomSuffix() string { suffix := make([]byte, 4) _, err := rand.Read(suffix) if err != nil { - level.Error(util.Logger).Log("msg", "failed to generate random suffix", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to generate random suffix", "err", err) return "error" } return fmt.Sprintf("%2x", suffix) @@ -225,6 +230,15 @@ type KV struct { watchers map[string][]chan string prefixWatchers map[string][]chan string + // Buffers with sent and received messages. Used for troubleshooting only. + // New messages are appended, old messages (based on configured size limit) removed from the front. + messagesMu sync.Mutex + sentMessages []message + sentMessagesSize int + receivedMessages []message + receivedMessagesSize int + messageCounter int // Used to give each message in the sentMessages and receivedMessages a unique ID, for UI. + // closed on shutdown shutdown chan struct{} @@ -255,6 +269,19 @@ type KV struct { maxCasRetries int } +// Message describes incoming or outgoing message, and local state after applying incoming message, or state when sending message. +// Fields are exported for templating to work. +type message struct { + ID int // Unique local ID of the message. + Time time.Time // Time when message was sent or received. + Size int // Message size + Pair KeyValuePair + + // Following values are computed on the receiving node, based on local state. + Version uint // For sent message, which version the message reflects. For received message, version after applying the message. + Changes []string // List of changes in this message (as computed by *this* node). +} + type valueDesc struct { // We store bytes here. Reason is that clients calling CAS function will modify the object in place, // but unless CAS succeeds, we don't want those modifications to be visible. @@ -267,6 +294,10 @@ type valueDesc struct { codecID string } +func (v valueDesc) String() string { + return fmt.Sprintf("size: %d, version: %d, codec: %s", len(v.value), v.version, v.codecID) +} + var ( // if merge fails because of CAS version mismatch, this error is returned. CAS operation reacts on it errVersionMismatch = errors.New("version mismatch") @@ -873,6 +904,14 @@ func (m *KV) broadcastNewValue(key string, change Mergeable, version uint, codec return } + m.addSentMessage(message{ + Time: time.Now(), + Size: len(pairData), + Pair: kvPair, + Version: version, + Changes: change.MergeContent(), + }) + m.queueBroadcast(key, change.MergeContent(), version, pairData) } @@ -914,11 +953,33 @@ func (m *KV) NotifyMsg(msg []byte) { // we have a ring update! Let's merge it with our version of the ring for given key mod, version, err := m.mergeBytesValueForKey(kvPair.Key, kvPair.Value, codec) + + changes := []string(nil) + if mod != nil { + changes = mod.MergeContent() + } + + m.addReceivedMessage(message{ + Time: time.Now(), + Size: len(msg), + Pair: kvPair, + Version: version, + Changes: changes, + }) + if err != nil { level.Error(m.logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) } else if version > 0 { m.notifyWatchers(kvPair.Key) + m.addSentMessage(message{ + Time: time.Now(), + Size: len(msg), + Pair: kvPair, + Version: version, + Changes: changes, + }) + // Forward this message // Memberlist will modify message once this function returns, so we need to make a copy msgCopy := append([]byte(nil), msg...) @@ -970,6 +1031,7 @@ func (m *KV) LocalState(join bool) []byte { // [4-bytes length of marshalled KV pair] [marshalled KV pair] buf := bytes.Buffer{} + sent := time.Now() kvPair := KeyValuePair{} for key, val := range m.store { @@ -999,6 +1061,13 @@ func (m *KV) LocalState(join bool) []byte { continue } buf.Write(ser) + + m.addSentMessage(message{ + Time: sent, + Size: len(ser), + Pair: kvPair, // Makes a copy of kvPair. + Version: val.version, + }) } m.totalSizeOfPulls.Add(float64(buf.Len())) @@ -1009,8 +1078,10 @@ func (m *KV) LocalState(join bool) []byte { // // This is 'push' part of push/pull sync. We merge incoming KV store (all keys and values) with ours. // -// Data is full state of remote KV store, as generated by `LocalState` method (run on another node). +// Data is full state of remote KV store, as generated by LocalState method (run on another node). func (m *KV) MergeRemoteState(data []byte, join bool) { + received := time.Now() + m.initWG.Wait() m.numberOfPushes.Inc() @@ -1053,6 +1124,20 @@ func (m *KV) MergeRemoteState(data []byte, join bool) { // we have both key and value, try to merge it with our state change, newver, err := m.mergeBytesValueForKey(kvPair.Key, kvPair.Value, codec) + + changes := []string(nil) + if change != nil { + changes = change.MergeContent() + } + + m.addReceivedMessage(message{ + Time: received, + Size: int(kvPairLength), + Pair: kvPair, // Makes a copy of kvPair. + Version: newver, + Changes: changes, + }) + if err != nil { level.Error(m.logger).Log("msg", "failed to store received value", "key", kvPair.Key, "err", err) } else if newver > 0 { @@ -1151,3 +1236,71 @@ func computeNewValue(incoming Mergeable, stored []byte, c codec.Codec, cas bool) change, err := oldVal.Merge(incoming, cas) return oldVal, change, err } + +func (m *KV) storeCopy() map[string]valueDesc { + m.storeMu.Lock() + defer m.storeMu.Unlock() + + result := make(map[string]valueDesc, len(m.store)) + for k, v := range m.store { + result[k] = v + } + return result +} +func (m *KV) addReceivedMessage(msg message) { + if m.cfg.MessageHistoryBufferBytes == 0 { + return + } + + m.messagesMu.Lock() + defer m.messagesMu.Unlock() + + m.messageCounter++ + msg.ID = m.messageCounter + + m.receivedMessages, m.receivedMessagesSize = addMessageToBuffer(m.receivedMessages, m.receivedMessagesSize, m.cfg.MessageHistoryBufferBytes, msg) +} + +func (m *KV) addSentMessage(msg message) { + if m.cfg.MessageHistoryBufferBytes == 0 { + return + } + + m.messagesMu.Lock() + defer m.messagesMu.Unlock() + + m.messageCounter++ + msg.ID = m.messageCounter + + m.sentMessages, m.sentMessagesSize = addMessageToBuffer(m.sentMessages, m.sentMessagesSize, m.cfg.MessageHistoryBufferBytes, msg) +} + +func (m *KV) getSentAndReceivedMessages() (sent, received []message) { + m.messagesMu.Lock() + defer m.messagesMu.Unlock() + + // Make copy of both slices. + return append([]message(nil), m.sentMessages...), append([]message(nil), m.receivedMessages...) +} + +func (m *KV) deleteSentReceivedMessages() { + m.messagesMu.Lock() + defer m.messagesMu.Unlock() + + m.sentMessages = nil + m.sentMessagesSize = 0 + m.receivedMessages = nil + m.receivedMessagesSize = 0 +} + +func addMessageToBuffer(msgs []message, size int, limit int, msg message) ([]message, int) { + msgs = append(msgs, msg) + size += msg.Size + + for len(msgs) > 0 && size > limit { + size -= msgs[0].Size + msgs = msgs[1:] + } + + return msgs, size +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go index f37d6fdc665..6b35a1e6904 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/memberlist_logger.go @@ -11,7 +11,7 @@ import ( // loggerAdapter wraps a Logger and allows it to be passed to the stdlib // logger's SetOutput. It understand and parses output produced by memberlist -// library (esp. level). Timestamp from memberlist can be ignored (eg. util.Logger +// library (esp. level). Timestamp from memberlist can be ignored (eg. pkg/util/log.Logger // is set up to auto-include timestamp with every message already) type loggerAdapter struct { log.Logger diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go index e0fcf7c9964..2cbcb6b15a9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/memberlist/metrics.go @@ -8,7 +8,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -191,7 +191,7 @@ func (m *KV) createAndRegisterMetrics() { } if err != nil { - level.Error(util.Logger).Log("msg", "failed to register prometheus metrics for memberlist", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to register prometheus metrics for memberlist", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go index 6fa84503648..1dbd6e2edb4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/metrics.go @@ -20,14 +20,22 @@ func RegistererWithKVName(reg prometheus.Registerer, name string) prometheus.Reg return prometheus.WrapRegistererWith(prometheus.Labels{"kv_name": name}, reg) } -// errorCode converts an error into an HTTP status code, modified from weaveworks/common/instrument -func errorCode(err error) string { +// getCasErrorCode converts the provided CAS error into the code that should be used to track the operation +// in metrics. +func getCasErrorCode(err error) string { if err == nil { return "200" } if resp, ok := httpgrpc.HTTPResponseFromError(err); ok { return strconv.Itoa(int(resp.GetCode())) } + + // If the error has been returned to abort the CAS operation, then we shouldn't + // consider it an error when tracking metrics. + if casErr, ok := err.(interface{ IsOperationAborted() bool }); ok && casErr.IsOperationAborted() { + return "200" + } + return "500" } @@ -81,7 +89,7 @@ func (m metrics) Delete(ctx context.Context, key string) error { } func (m metrics) CAS(ctx context.Context, key string, f func(in interface{}) (out interface{}, retry bool, err error)) error { - return instrument.CollectedRequest(ctx, "CAS", m.requestDuration, errorCode, func(ctx context.Context) error { + return instrument.CollectedRequest(ctx, "CAS", m.requestDuration, getCasErrorCode, func(ctx context.Context) error { return m.c.CAS(ctx, key, f) }) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go index c899b634326..ac7ae011df7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/mock.go @@ -5,7 +5,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // The mockClient does not anything. @@ -13,7 +13,7 @@ import ( type mockClient struct{} func buildMockClient() (Client, error) { - level.Warn(util.Logger).Log("msg", "created mockClient for testing only") + level.Warn(util_log.Logger).Log("msg", "created mockClient for testing only") return mockClient{}, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go index 3817725fe3a..3bfb1bcdbba 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/kv/multi.go @@ -11,7 +11,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/go-kit/kit/log/level" ) @@ -118,7 +118,7 @@ func NewMultiClient(cfg MultiConfig, clients []kvclient) *MultiClient { mirrorTimeout: cfg.MirrorTimeout, mirroringEnabled: atomic.NewBool(cfg.MirrorEnabled), - logger: log.With(util.Logger, "component", "multikv"), + logger: log.With(util_log.Logger, "component", "multikv"), } ctx, cancelFn := context.WithCancel(context.Background()) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go index cc03bf0fbaa..4e82d645209 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/lifecycler.go @@ -17,8 +17,8 @@ import ( "go.uber.org/atomic" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -92,7 +92,7 @@ func (cfg *LifecyclerConfig) RegisterFlagsWithPrefix(prefix string, f *flag.Flag hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } @@ -164,7 +164,7 @@ func NewLifecycler(cfg LifecyclerConfig, flushTransferer FlushTransferer, ringNa zone := cfg.Zone if zone != "" { - util.WarnExperimentalUse("Zone aware replication") + log.WarnExperimentalUse("Zone aware replication") } // We do allow a nil FlushTransferer, but to keep the ring logic easier we assume @@ -218,7 +218,7 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error { desc, err := i.KVStore.Get(ctx, i.RingKey) if err != nil { - level.Error(util.Logger).Log("msg", "error talking to the KV store", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "error talking to the KV store", "ring", i.RingName, "err", err) return fmt.Errorf("error talking to the KV store: %s", err) } @@ -232,7 +232,7 @@ func (i *Lifecycler) CheckReady(ctx context.Context) error { } if err := ringDesc.Ready(time.Now(), i.cfg.RingConfig.HeartbeatTimeout); err != nil { - level.Warn(util.Logger).Log("msg", "found an existing instance(s) with a problem in the ring, "+ + level.Warn(log.Logger).Log("msg", "found an existing instance(s) with a problem in the ring, "+ "this instance cannot become ready until this problem is resolved. "+ "The /ring http endpoint on the distributor (or single binary) provides visibility into the ring.", "ring", i.RingName, "err", err) @@ -298,7 +298,7 @@ func (i *Lifecycler) setTokens(tokens Tokens) { i.tokens = tokens if i.cfg.TokensFilePath != "" { if err := i.tokens.StoreToFile(i.cfg.TokensFilePath); err != nil { - level.Error(util.Logger).Log("msg", "error storing tokens to disk", "path", i.cfg.TokensFilePath, "err", err) + level.Error(log.Logger).Log("msg", "error storing tokens to disk", "path", i.cfg.TokensFilePath, "err", err) } } } @@ -342,7 +342,7 @@ func (i *Lifecycler) ClaimTokensFor(ctx context.Context, ingesterID string) erro } if err := i.KVStore.CAS(ctx, i.RingKey, claimTokens); err != nil { - level.Error(util.Logger).Log("msg", "Failed to write to the KV store", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "Failed to write to the KV store", "ring", i.RingName, "err", err) } i.setTokens(tokens) @@ -390,11 +390,11 @@ func (i *Lifecycler) loop(ctx context.Context) error { for { select { case <-autoJoinAfter: - level.Debug(util.Logger).Log("msg", "JoinAfter expired", "ring", i.RingName) + level.Debug(log.Logger).Log("msg", "JoinAfter expired", "ring", i.RingName) // Will only fire once, after auto join timeout. If we haven't entered "JOINING" state, // then pick some tokens and enter ACTIVE state. if i.GetState() == PENDING { - level.Info(util.Logger).Log("msg", "auto-joining cluster after timeout", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "auto-joining cluster after timeout", "ring", i.RingName) if i.cfg.ObservePeriod > 0 { // let's observe the ring. By using JOINING state, this ingester will be ignored by LEAVING @@ -403,7 +403,7 @@ func (i *Lifecycler) loop(ctx context.Context) error { return perrors.Wrapf(err, "failed to pick tokens in the KV store, ring: %s", i.RingName) } - level.Info(util.Logger).Log("msg", "observing tokens before going ACTIVE", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "observing tokens before going ACTIVE", "ring", i.RingName) observeChan = time.After(i.cfg.ObservePeriod) } else { if err := i.autoJoin(context.Background(), ACTIVE); err != nil { @@ -418,18 +418,18 @@ func (i *Lifecycler) loop(ctx context.Context) error { observeChan = nil if s := i.GetState(); s != JOINING { - level.Error(util.Logger).Log("msg", "unexpected state while observing tokens", "state", s, "ring", i.RingName) + level.Error(log.Logger).Log("msg", "unexpected state while observing tokens", "state", s, "ring", i.RingName) } if i.verifyTokens(context.Background()) { - level.Info(util.Logger).Log("msg", "token verification successful", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "token verification successful", "ring", i.RingName) err := i.changeState(context.Background(), ACTIVE) if err != nil { - level.Error(util.Logger).Log("msg", "failed to set state to ACTIVE", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "failed to set state to ACTIVE", "ring", i.RingName, "err", err) } } else { - level.Info(util.Logger).Log("msg", "token verification failed, observing", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "token verification failed, observing", "ring", i.RingName) // keep observing observeChan = time.After(i.cfg.ObservePeriod) } @@ -437,14 +437,14 @@ func (i *Lifecycler) loop(ctx context.Context) error { case <-heartbeatTicker.C: consulHeartbeats.WithLabelValues(i.RingName).Inc() if err := i.updateConsul(context.Background()); err != nil { - level.Error(util.Logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err) } case f := <-i.actorChan: f() case <-ctx.Done(): - level.Info(util.Logger).Log("msg", "lifecycler loop() exited gracefully", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "lifecycler loop() exited gracefully", "ring", i.RingName) return nil } } @@ -467,7 +467,7 @@ func (i *Lifecycler) stopping(runningError error) error { // Mark ourselved as Leaving so no more samples are send to us. err := i.changeState(context.Background(), LEAVING) if err != nil { - level.Error(util.Logger).Log("msg", "failed to set state to LEAVING", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "failed to set state to LEAVING", "ring", i.RingName, "err", err) } // Do the transferring / flushing on a background goroutine so we can continue @@ -484,7 +484,7 @@ heartbeatLoop: case <-heartbeatTicker.C: consulHeartbeats.WithLabelValues(i.RingName).Inc() if err := i.updateConsul(context.Background()); err != nil { - level.Error(util.Logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "failed to write to the KV store, sleeping", "ring", i.RingName, "err", err) } case <-done: @@ -496,7 +496,7 @@ heartbeatLoop: if err := i.unregister(context.Background()); err != nil { return perrors.Wrapf(err, "failed to unregister from the KV store, ring: %s", i.RingName) } - level.Info(util.Logger).Log("msg", "instance removed from the KV store", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "instance removed from the KV store", "ring", i.RingName) } return nil @@ -514,11 +514,11 @@ func (i *Lifecycler) initRing(ctx context.Context) error { if i.cfg.TokensFilePath != "" { tokensFromFile, err = LoadTokensFromFile(i.cfg.TokensFilePath) - if err != nil { - level.Error(util.Logger).Log("msg", "error in getting tokens from file", "err", err) + if err != nil && !os.IsNotExist(err) { + level.Error(log.Logger).Log("msg", "error loading tokens from file", "err", err) } } else { - level.Info(util.Logger).Log("msg", "not loading tokens from file, tokens file path is empty") + level.Info(log.Logger).Log("msg", "not loading tokens from file, tokens file path is empty") } err = i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { @@ -528,7 +528,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { ringDesc = in.(*Desc) } - ingesterDesc, ok := ringDesc.Ingesters[i.ID] + instanceDesc, ok := ringDesc.Ingesters[i.ID] if !ok { // The instance doesn't exist in the ring, so it's safe to set the registered timestamp // as of now. @@ -537,7 +537,7 @@ func (i *Lifecycler) initRing(ctx context.Context) error { // We use the tokens from the file only if it does not exist in the ring yet. if len(tokensFromFile) > 0 { - level.Info(util.Logger).Log("msg", "adding tokens from file", "num_tokens", len(tokensFromFile)) + level.Info(log.Logger).Log("msg", "adding tokens from file", "num_tokens", len(tokensFromFile)) if len(tokensFromFile) >= i.cfg.NumTokens { i.setState(ACTIVE) } @@ -547,38 +547,38 @@ func (i *Lifecycler) initRing(ctx context.Context) error { } // Either we are a new ingester, or consul must have restarted - level.Info(util.Logger).Log("msg", "instance not found in ring, adding with no tokens", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "instance not found in ring, adding with no tokens", "ring", i.RingName) ringDesc.AddIngester(i.ID, i.Addr, i.Zone, []uint32{}, i.GetState(), registeredAt) return ringDesc, true, nil } // The instance already exists in the ring, so we can't change the registered timestamp (even if it's zero) // but we need to update the local state accordingly. - i.setRegisteredAt(ingesterDesc.GetRegisteredAt()) + i.setRegisteredAt(instanceDesc.GetRegisteredAt()) // If the ingester is in the JOINING state this means it crashed due to // a failed token transfer or some other reason during startup. We want // to set it back to PENDING in order to start the lifecycle from the // beginning. - if ingesterDesc.State == JOINING { - level.Warn(util.Logger).Log("msg", "instance found in ring as JOINING, setting to PENDING", + if instanceDesc.State == JOINING { + level.Warn(log.Logger).Log("msg", "instance found in ring as JOINING, setting to PENDING", "ring", i.RingName) - ingesterDesc.State = PENDING + instanceDesc.State = PENDING return ringDesc, true, nil } // If the ingester failed to clean it's ring entry up in can leave it's state in LEAVING. // Move it into ACTIVE to ensure the ingester joins the ring. - if ingesterDesc.State == LEAVING && len(ingesterDesc.Tokens) == i.cfg.NumTokens { - ingesterDesc.State = ACTIVE + if instanceDesc.State == LEAVING && len(instanceDesc.Tokens) == i.cfg.NumTokens { + instanceDesc.State = ACTIVE } // We exist in the ring, so assume the ring is right and copy out tokens & state out of there. - i.setState(ingesterDesc.State) + i.setState(instanceDesc.State) tokens, _ := ringDesc.TokensFor(i.ID) i.setTokens(tokens) - level.Info(util.Logger).Log("msg", "existing entry found in ring", "state", i.GetState(), "tokens", len(tokens), "ring", i.RingName) + level.Info(log.Logger).Log("msg", "existing entry found in ring", "state", i.GetState(), "tokens", len(tokens), "ring", i.RingName) // we haven't modified the ring, don't try to store it. return nil, true, nil }) @@ -612,7 +612,7 @@ func (i *Lifecycler) verifyTokens(ctx context.Context) bool { // uh, oh... our tokens are not our anymore. Let's try new ones. needTokens := i.cfg.NumTokens - len(ringTokens) - level.Info(util.Logger).Log("msg", "generating new tokens", "count", needTokens, "ring", i.RingName) + level.Info(log.Logger).Log("msg", "generating new tokens", "count", needTokens, "ring", i.RingName) newTokens := GenerateTokens(needTokens, takenTokens) ringTokens = append(ringTokens, newTokens...) @@ -631,7 +631,7 @@ func (i *Lifecycler) verifyTokens(ctx context.Context) bool { }) if err != nil { - level.Error(util.Logger).Log("msg", "failed to verify tokens", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "failed to verify tokens", "ring", i.RingName, "err", err) return false } @@ -670,7 +670,7 @@ func (i *Lifecycler) autoJoin(ctx context.Context, targetState IngesterState) er // At this point, we should not have any tokens, and we should be in PENDING state. myTokens, takenTokens := ringDesc.TokensFor(i.ID) if len(myTokens) > 0 { - level.Error(util.Logger).Log("msg", "tokens already exist for this instance - wasn't expecting any!", "num_tokens", len(myTokens), "ring", i.RingName) + level.Error(log.Logger).Log("msg", "tokens already exist for this instance - wasn't expecting any!", "num_tokens", len(myTokens), "ring", i.RingName) } newTokens := GenerateTokens(i.cfg.NumTokens-len(myTokens), takenTokens) @@ -705,18 +705,18 @@ func (i *Lifecycler) updateConsul(ctx context.Context) error { ringDesc = in.(*Desc) } - ingesterDesc, ok := ringDesc.Ingesters[i.ID] + instanceDesc, ok := ringDesc.Ingesters[i.ID] if !ok { // consul must have restarted - level.Info(util.Logger).Log("msg", "found empty ring, inserting tokens", "ring", i.RingName) + level.Info(log.Logger).Log("msg", "found empty ring, inserting tokens", "ring", i.RingName) ringDesc.AddIngester(i.ID, i.Addr, i.Zone, i.getTokens(), i.GetState(), i.getRegisteredAt()) } else { - ingesterDesc.Timestamp = time.Now().Unix() - ingesterDesc.State = i.GetState() - ingesterDesc.Addr = i.Addr - ingesterDesc.Zone = i.Zone - ingesterDesc.RegisteredTimestamp = i.getRegisteredAt().Unix() - ringDesc.Ingesters[i.ID] = ingesterDesc + instanceDesc.Timestamp = time.Now().Unix() + instanceDesc.State = i.GetState() + instanceDesc.Addr = i.Addr + instanceDesc.Zone = i.Zone + instanceDesc.RegisteredTimestamp = i.getRegisteredAt().Unix() + ringDesc.Ingesters[i.ID] = instanceDesc } return ringDesc, true, nil @@ -743,7 +743,7 @@ func (i *Lifecycler) changeState(ctx context.Context, state IngesterState) error return fmt.Errorf("Changing instance state from %v -> %v is disallowed", currState, state) } - level.Info(util.Logger).Log("msg", "changing instance state from", "old_state", currState, "new_state", state, "ring", i.RingName) + level.Info(log.Logger).Log("msg", "changing instance state from", "old_state", currState, "new_state", state, "ring", i.RingName) i.setState(state) return i.updateConsul(ctx) } @@ -753,11 +753,13 @@ func (i *Lifecycler) updateCounters(ringDesc *Desc) { zones := map[string]struct{}{} if ringDesc != nil { + now := time.Now() + for _, ingester := range ringDesc.Ingesters { zones[ingester.Zone] = struct{}{} // Count the number of healthy instances for Write operation. - if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout) { + if ingester.IsHealthy(Write, i.cfg.RingConfig.HeartbeatTimeout, now) { healthyInstancesCount++ } } @@ -796,9 +798,9 @@ func (i *Lifecycler) processShutdown(ctx context.Context) { transferStart := time.Now() if err := i.flushTransferer.TransferOut(ctx); err != nil { if err == ErrTransferDisabled { - level.Info(util.Logger).Log("msg", "transfers are disabled") + level.Info(log.Logger).Log("msg", "transfers are disabled") } else { - level.Error(util.Logger).Log("msg", "failed to transfer chunks to another instance", "ring", i.RingName, "err", err) + level.Error(log.Logger).Log("msg", "failed to transfer chunks to another instance", "ring", i.RingName, "err", err) shutdownDuration.WithLabelValues("transfer", "fail", i.RingName).Observe(time.Since(transferStart).Seconds()) } } else { @@ -818,7 +820,7 @@ func (i *Lifecycler) processShutdown(ctx context.Context) { // unregister removes our entry from consul. func (i *Lifecycler) unregister(ctx context.Context) error { - level.Debug(util.Logger).Log("msg", "unregistering instance from ring", "ring", i.RingName) + level.Debug(log.Logger).Log("msg", "unregistering instance from ring", "ring", i.RingName) return i.KVStore.CAS(ctx, i.RingKey, func(in interface{}) (out interface{}, retry bool, err error) { if in == nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go index d9ebd78155c..4187275184f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/model.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/model.go @@ -1,6 +1,7 @@ package ring import ( + "container/heap" "fmt" "sort" "time" @@ -11,15 +12,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv/memberlist" ) -// ByToken is a sortable list of TokenDescs -type ByToken []TokenDesc - -func (ts ByToken) Len() int { return len(ts) } -func (ts ByToken) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } -func (ts ByToken) Less(i, j int) bool { return ts[i].Token < ts[j].Token } - -// ByAddr is a sortable list of IngesterDesc. -type ByAddr []IngesterDesc +// ByAddr is a sortable list of InstanceDesc. +type ByAddr []InstanceDesc func (ts ByAddr) Len() int { return len(ts) } func (ts ByAddr) Swap(i, j int) { ts[i], ts[j] = ts[j], ts[i] } @@ -38,15 +32,15 @@ func GetCodec() codec.Codec { // NewDesc returns an empty ring.Desc func NewDesc() *Desc { return &Desc{ - Ingesters: map[string]IngesterDesc{}, + Ingesters: map[string]InstanceDesc{}, } } // AddIngester adds the given ingester to the ring. Ingester will only use supplied tokens, // any other tokens are removed. -func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState, registeredAt time.Time) IngesterDesc { +func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state IngesterState, registeredAt time.Time) InstanceDesc { if d.Ingesters == nil { - d.Ingesters = map[string]IngesterDesc{} + d.Ingesters = map[string]InstanceDesc{} } registeredTimestamp := int64(0) @@ -54,7 +48,7 @@ func (d *Desc) AddIngester(id, addr, zone string, tokens []uint32, state Ingeste registeredTimestamp = registeredAt.Unix() } - ingester := IngesterDesc{ + ingester := InstanceDesc{ Addr: addr, Timestamp: time.Now().Unix(), RegisteredTimestamp: registeredTimestamp, @@ -93,8 +87,8 @@ func (d *Desc) ClaimTokens(from, to string) Tokens { } // FindIngestersByState returns the list of ingesters in the given state -func (d *Desc) FindIngestersByState(state IngesterState) []IngesterDesc { - var result []IngesterDesc +func (d *Desc) FindIngestersByState(state IngesterState) []InstanceDesc { + var result []InstanceDesc for _, ing := range d.Ingesters { if ing.State == state { result = append(result, ing) @@ -121,21 +115,17 @@ func (d *Desc) Ready(now time.Time, heartbeatTimeout time.Duration) error { return nil } -// TokensFor partitions the tokens into those for the given ID, and those for others. -func (d *Desc) TokensFor(id string) (tokens, other Tokens) { - takenTokens, myTokens := Tokens{}, Tokens{} - for _, token := range d.getTokens() { - takenTokens = append(takenTokens, token.Token) - if token.Ingester == id { - myTokens = append(myTokens, token.Token) - } - } - return myTokens, takenTokens +// TokensFor return all ring tokens and tokens for the input provided ID. +// Returned tokens are guaranteed to be sorted. +func (d *Desc) TokensFor(id string) (myTokens, allTokens Tokens) { + allTokens = d.GetTokens() + myTokens = d.Ingesters[id].Tokens + return } // GetRegisteredAt returns the timestamp when the instance has been registered to the ring // or a zero value if unknown. -func (i *IngesterDesc) GetRegisteredAt() time.Time { +func (i *InstanceDesc) GetRegisteredAt() time.Time { if i == nil || i.RegisteredTimestamp == 0 { return time.Time{} } @@ -143,34 +133,10 @@ func (i *IngesterDesc) GetRegisteredAt() time.Time { return time.Unix(i.RegisteredTimestamp, 0) } -// IsHealthy checks whether the ingester appears to be alive and heartbeating -func (i *IngesterDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration) bool { - healthy := false - - switch op { - case Write: - healthy = i.State == ACTIVE - - case Read: - healthy = (i.State == ACTIVE) || (i.State == LEAVING) || (i.State == PENDING) - - case Reporting: - healthy = true - - case BlocksSync: - healthy = (i.State == JOINING) || (i.State == ACTIVE) || (i.State == LEAVING) +func (i *InstanceDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration, now time.Time) bool { + healthy := op.IsInstanceInStateHealthy(i.State) - case BlocksRead: - healthy = i.State == ACTIVE - - case Ruler: - healthy = i.State == ACTIVE - - case Compactor: - healthy = i.State == ACTIVE - } - - return healthy && time.Since(time.Unix(i.Timestamp, 0)) <= heartbeatTimeout + return healthy && now.Unix()-i.Timestamp <= heartbeatTimeout.Milliseconds()/1000 } // Merge merges other ring into this one. Returns sub-ring that represents the change, @@ -189,6 +155,10 @@ func (i *IngesterDesc) IsHealthy(op Operation, heartbeatTimeout time.Duration) b // // This method is part of memberlist.Mergeable interface, and is only used by gossiping ring. func (d *Desc) Merge(mergeable memberlist.Mergeable, localCAS bool) (memberlist.Mergeable, error) { + return d.mergeWithTime(mergeable, localCAS, time.Now()) +} + +func (d *Desc) mergeWithTime(mergeable memberlist.Mergeable, localCAS bool, now time.Time) (memberlist.Mergeable, error) { if mergeable == nil { return nil, nil } @@ -229,6 +199,10 @@ func (d *Desc) Merge(mergeable memberlist.Mergeable, localCAS bool) (memberlist. // missing, let's mark our ingester as LEFT ting.State = LEFT ting.Tokens = nil + // We are deleting entry "now", and should not keep old timestamp, because there may already be pending + // message in the gossip network with newer timestamp (but still older than "now"). + // Such message would "resurrect" this deleted entry. + ting.Timestamp = now.Unix() thisIngesterMap[name] = ting updated = append(updated, name) @@ -271,8 +245,8 @@ func (d *Desc) MergeContent() []string { // buildNormalizedIngestersMap will do the following: // - sorts tokens and removes duplicates (only within single ingester) // - it doesn't modify input ring -func buildNormalizedIngestersMap(inputRing *Desc) map[string]IngesterDesc { - out := map[string]IngesterDesc{} +func buildNormalizedIngestersMap(inputRing *Desc) map[string]InstanceDesc { + out := map[string]InstanceDesc{} // Make sure LEFT ingesters have no tokens for n, ing := range inputRing.Ingesters { @@ -310,7 +284,7 @@ func buildNormalizedIngestersMap(inputRing *Desc) map[string]IngesterDesc { return out } -func conflictingTokensExist(normalizedIngesters map[string]IngesterDesc) bool { +func conflictingTokensExist(normalizedIngesters map[string]InstanceDesc) bool { count := 0 for _, ing := range normalizedIngesters { count += len(ing.Tokens) @@ -335,7 +309,7 @@ func conflictingTokensExist(normalizedIngesters map[string]IngesterDesc) bool { // 2) otherwise node names are compared, and node with "lower" name wins the token // // Modifies ingesters map with updated tokens. -func resolveConflicts(normalizedIngesters map[string]IngesterDesc) { +func resolveConflicts(normalizedIngesters map[string]InstanceDesc) { size := 0 for _, ing := range normalizedIngesters { size += len(ing.Tokens) @@ -411,46 +385,43 @@ func (d *Desc) RemoveTombstones(limit time.Time) { } } -type TokenDesc struct { - Token uint32 - Ingester string - Zone string -} +func (d *Desc) getTokensInfo() map[uint32]instanceInfo { + out := map[uint32]instanceInfo{} -// getTokens returns sorted list of tokens with ingester IDs, owned by each ingester in the ring. -func (d *Desc) getTokens() []TokenDesc { - numTokens := 0 - for _, ing := range d.Ingesters { - numTokens += len(ing.Tokens) - } - tokens := make([]TokenDesc, 0, numTokens) - for key, ing := range d.Ingesters { - for _, token := range ing.Tokens { - tokens = append(tokens, TokenDesc{Token: token, Ingester: key, Zone: ing.GetZone()}) + for instanceID, instance := range d.Ingesters { + info := instanceInfo{ + InstanceID: instanceID, + Zone: instance.Zone, + } + + for _, token := range instance.Tokens { + out[token] = info } } - sort.Sort(ByToken(tokens)) - return tokens + return out } -// getTokensByZone returns instances tokens grouped by zone. Tokens within each zone -// are guaranteed to be sorted. -func (d *Desc) getTokensByZone() map[string][]TokenDesc { - zones := map[string][]TokenDesc{} - - for key, ing := range d.Ingesters { - for _, token := range ing.Tokens { - zones[ing.Zone] = append(zones[ing.Zone], TokenDesc{Token: token, Ingester: key, Zone: ing.GetZone()}) - } +// GetTokens returns sorted list of tokens owned by all instances within the ring. +func (d *Desc) GetTokens() []uint32 { + instances := make([][]uint32, 0, len(d.Ingesters)) + for _, instance := range d.Ingesters { + instances = append(instances, instance.Tokens) } - // Ensure tokens are sorted within each zone. - for zone := range zones { - sort.Sort(ByToken(zones[zone])) + return MergeTokens(instances) +} + +// getTokensByZone returns instances tokens grouped by zone. Tokens within each zone +// are guaranteed to be sorted. +func (d *Desc) getTokensByZone() map[string][]uint32 { + zones := map[string][][]uint32{} + for _, instance := range d.Ingesters { + zones[instance.Zone] = append(zones[instance.Zone], instance.Tokens) } - return zones + // Merge tokens per zone. + return MergeTokensByZone(zones) } type CompareResult int @@ -531,3 +502,79 @@ func GetOrCreateRingDesc(d interface{}) *Desc { } return d.(*Desc) } + +// TokensHeap is an heap data structure used to merge multiple lists +// of sorted tokens into a single one. +type TokensHeap [][]uint32 + +func (h TokensHeap) Len() int { + return len(h) +} + +func (h TokensHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h TokensHeap) Less(i, j int) bool { + return h[i][0] < h[j][0] +} + +func (h *TokensHeap) Push(x interface{}) { + *h = append(*h, x.([]uint32)) +} + +func (h *TokensHeap) Pop() interface{} { + old := *h + n := len(old) + x := old[n-1] + *h = old[0 : n-1] + return x +} + +// MergeTokens takes in input multiple lists of tokens and returns a single list +// containing all tokens merged and sorted. Each input single list is required +// to have tokens already sorted. +func MergeTokens(instances [][]uint32) []uint32 { + numTokens := 0 + + // Build the heap. + h := make(TokensHeap, 0, len(instances)) + for _, tokens := range instances { + if len(tokens) == 0 { + continue + } + + // We can safely append the input slice because elements inside are never shuffled. + h = append(h, tokens) + numTokens += len(tokens) + } + heap.Init(&h) + + out := make([]uint32, 0, numTokens) + + for h.Len() > 0 { + // The minimum element in the tree is the root, at index 0. + lowest := h[0] + out = append(out, lowest[0]) + + if len(lowest) > 1 { + // Remove the first token from the lowest because we popped it + // and then fix the heap to keep it sorted. + h[0] = h[0][1:] + heap.Fix(&h, 0) + } else { + heap.Remove(&h, 0) + } + } + + return out +} + +// MergeTokensByZone is like MergeTokens but does it for each input zone. +func MergeTokensByZone(zones map[string][][]uint32) map[string][]uint32 { + out := make(map[string][]uint32, len(zones)) + for zone, tokens := range zones { + out[zone] = MergeTokens(tokens) + } + return out +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go index adc619e85cb..391773dff15 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set.go @@ -9,7 +9,7 @@ import ( // ReplicationSet describes the ingesters to talk to for a given key, and how // many errors to tolerate. type ReplicationSet struct { - Ingesters []IngesterDesc + Ingesters []InstanceDesc // Maximum number of tolerated failing instances. Max errors and max unavailable zones are // mutually exclusive. @@ -22,11 +22,11 @@ type ReplicationSet struct { // Do function f in parallel for all replicas in the set, erroring is we exceed // MaxErrors and returning early otherwise. -func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *IngesterDesc) (interface{}, error)) ([]interface{}, error) { +func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(context.Context, *InstanceDesc) (interface{}, error)) ([]interface{}, error) { type instanceResult struct { res interface{} err error - instance *IngesterDesc + instance *InstanceDesc } // Initialise the result tracker, which is use to keep track of successes and failures. @@ -46,7 +46,7 @@ func (r ReplicationSet) Do(ctx context.Context, delay time.Duration, f func(cont // Spawn a goroutine for each instance. for i := range r.Ingesters { - go func(i int, ing *IngesterDesc) { + go func(i int, ing *InstanceDesc) { // Wait to send extra requests. Works only when zone-awareness is disabled. if delay > 0 && r.MaxUnavailableZones == 0 && i >= len(r.Ingesters)-r.MaxErrors { after := time.NewTimer(delay) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go index 09f12e3cebb..fcdf5441dd2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_set_tracker.go @@ -3,7 +3,7 @@ package ring type replicationSetResultTracker interface { // Signals an instance has done the execution, either successful (no error) // or failed (with error). - done(instance *IngesterDesc, err error) + done(instance *InstanceDesc, err error) // Returns true if the minimum number of successful results have been received. succeeded() bool @@ -19,7 +19,7 @@ type defaultResultTracker struct { maxErrors int } -func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultResultTracker { +func newDefaultResultTracker(instances []InstanceDesc, maxErrors int) *defaultResultTracker { return &defaultResultTracker{ minSucceeded: len(instances) - maxErrors, numSucceeded: 0, @@ -28,7 +28,7 @@ func newDefaultResultTracker(instances []IngesterDesc, maxErrors int) *defaultRe } } -func (t *defaultResultTracker) done(_ *IngesterDesc, err error) { +func (t *defaultResultTracker) done(_ *InstanceDesc, err error) { if err == nil { t.numSucceeded++ } else { @@ -53,7 +53,7 @@ type zoneAwareResultTracker struct { maxUnavailableZones int } -func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int) *zoneAwareResultTracker { +func newZoneAwareResultTracker(instances []InstanceDesc, maxUnavailableZones int) *zoneAwareResultTracker { t := &zoneAwareResultTracker{ waitingByZone: make(map[string]int), failuresByZone: make(map[string]int), @@ -68,7 +68,7 @@ func newZoneAwareResultTracker(instances []IngesterDesc, maxUnavailableZones int return t } -func (t *zoneAwareResultTracker) done(instance *IngesterDesc, err error) { +func (t *zoneAwareResultTracker) done(instance *InstanceDesc, err error) { t.waitingByZone[instance.Zone]-- if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go index 3490853dce4..e572cb77a44 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/replication_strategy.go @@ -3,96 +3,95 @@ package ring import ( "fmt" "time" + + "github.com/pkg/errors" ) type ReplicationStrategy interface { // Filter out unhealthy instances and checks if there're enough instances // for an operation to succeed. Returns an error if there are not enough // instances. - Filter(instances []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []IngesterDesc, maxFailures int, err error) - - // ShouldExtendReplicaSet returns true if given an instance that's going to be - // added to the replica set, the replica set size should be extended by 1 - // more instance for the given operation. - ShouldExtendReplicaSet(instance IngesterDesc, op Operation) bool + Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) (healthy []InstanceDesc, maxFailures int, err error) } -type defaultReplicationStrategy struct { - ExtendWrites bool -} +type defaultReplicationStrategy struct{} -func NewDefaultReplicationStrategy(extendWrites bool) ReplicationStrategy { - return &defaultReplicationStrategy{ - ExtendWrites: extendWrites, - } +func NewDefaultReplicationStrategy() ReplicationStrategy { + return &defaultReplicationStrategy{} } -// Filter decides, given the set of ingesters eligible for a key, -// which ingesters you will try and write to and how many failures you will +// Filter decides, given the set of instances eligible for a key, +// which instances you will try and write to and how many failures you will // tolerate. -// - Filters out dead ingesters so the one doesn't even try to write to them. -// - Checks there is enough ingesters for an operation to succeed. -// The ingesters argument may be overwritten. -func (s *defaultReplicationStrategy) Filter(ingesters []IngesterDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]IngesterDesc, int, error) { - // We need a response from a quorum of ingesters, which is n/2 + 1. In the +// - Filters out unhealthy instances so the one doesn't even try to write to them. +// - Checks there are enough instances for an operation to succeed. +// The instances argument may be overwritten. +func (s *defaultReplicationStrategy) Filter(instances []InstanceDesc, op Operation, replicationFactor int, heartbeatTimeout time.Duration, zoneAwarenessEnabled bool) ([]InstanceDesc, int, error) { + // We need a response from a quorum of instances, which is n/2 + 1. In the // case of a node joining/leaving, the actual replica set might be bigger // than the replication factor, so use the bigger or the two. - if len(ingesters) > replicationFactor { - replicationFactor = len(ingesters) + if len(instances) > replicationFactor { + replicationFactor = len(instances) } minSuccess := (replicationFactor / 2) + 1 + now := time.Now() // Skip those that have not heartbeated in a while. NB these are still - // included in the calculation of minSuccess, so if too many failed ingesters + // included in the calculation of minSuccess, so if too many failed instances // will cause the whole write to fail. - for i := 0; i < len(ingesters); { - if ingesters[i].IsHealthy(op, heartbeatTimeout) { + for i := 0; i < len(instances); { + if instances[i].IsHealthy(op, heartbeatTimeout, now) { i++ } else { - ingesters = append(ingesters[:i], ingesters[i+1:]...) + instances = append(instances[:i], instances[i+1:]...) } } - // This is just a shortcut - if there are not minSuccess available ingesters, + // This is just a shortcut - if there are not minSuccess available instances, // after filtering out dead ones, don't even bother trying. - if len(ingesters) < minSuccess { + if len(instances) < minSuccess { var err error if zoneAwarenessEnabled { - err = fmt.Errorf("at least %d live replicas required across different availability zones, could only find %d", minSuccess, len(ingesters)) + err = fmt.Errorf("at least %d live replicas required across different availability zones, could only find %d", minSuccess, len(instances)) } else { - err = fmt.Errorf("at least %d live replicas required, could only find %d", minSuccess, len(ingesters)) + err = fmt.Errorf("at least %d live replicas required, could only find %d", minSuccess, len(instances)) } return nil, 0, err } - return ingesters, len(ingesters) - minSuccess, nil + return instances, len(instances) - minSuccess, nil } -func (s *defaultReplicationStrategy) ShouldExtendReplicaSet(ingester IngesterDesc, op Operation) bool { - // We do not want to Write to Ingesters that are not ACTIVE, but we do want - // to write the extra replica somewhere. So we increase the size of the set - // of replicas for the key. This means we have to also increase the - // size of the replica set for read, but we can read from Leaving ingesters, - // so don't skip it in this case. - // NB dead ingester will be filtered later by defaultReplicationStrategy.Filter(). - if op == Write { - if s.ExtendWrites { - return ingester.State != ACTIVE +type ignoreUnhealthyInstancesReplicationStrategy struct{} + +func NewIgnoreUnhealthyInstancesReplicationStrategy() ReplicationStrategy { + return &ignoreUnhealthyInstancesReplicationStrategy{} +} + +func (r *ignoreUnhealthyInstancesReplicationStrategy) Filter(instances []InstanceDesc, op Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []InstanceDesc, maxFailures int, err error) { + now := time.Now() + // Filter out unhealthy instances. + for i := 0; i < len(instances); { + if instances[i].IsHealthy(op, heartbeatTimeout, now) { + i++ + } else { + instances = append(instances[:i], instances[i+1:]...) } - return false - } else if op == Read && (ingester.State != ACTIVE && ingester.State != LEAVING) { - return true } - return false + // We need at least 1 healthy instance no matter what is the replication factor set to. + if len(instances) == 0 { + return nil, 0, errors.New("at least 1 healthy replica required, could only find 0") + } + + return instances, len(instances) - 1, nil } -// IsHealthy checks whether an ingester appears to be alive and heartbeating -func (r *Ring) IsHealthy(ingester *IngesterDesc, op Operation) bool { - return ingester.IsHealthy(op, r.cfg.HeartbeatTimeout) +func (r *Ring) IsHealthy(instance *InstanceDesc, op Operation, now time.Time) bool { + return instance.IsHealthy(op, r.cfg.HeartbeatTimeout, now) } // ReplicationFactor of the ring. @@ -100,8 +99,8 @@ func (r *Ring) ReplicationFactor() int { return r.cfg.ReplicationFactor } -// IngesterCount is number of ingesters in the ring -func (r *Ring) IngesterCount() int { +// InstancesCount returns the number of instances in the ring. +func (r *Ring) InstancesCount() int { r.mtx.RLock() c := len(r.ringDesc.Ingesters) r.mtx.RUnlock() diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go index 2cdd18948a5..60e3b6e772e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.go @@ -17,6 +17,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring/kv" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" + util_math "github.com/cortexproject/cortex/pkg/util/math" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -34,20 +36,24 @@ const ( // CompactorRingKey is the key under which we store the compactors ring in the KVStore. CompactorRingKey = "compactor" + + // GetBufferSize is the suggested size of buffers passed to Ring.Get(). It's based on + // a typical replication factor 3, plus extra room for a JOINING + LEAVING instance. + GetBufferSize = 5 ) // ReadRing represents the read interface to the ring. type ReadRing interface { prometheus.Collector - // Get returns n (or more) ingesters which form the replicas for the given key. - // buf is a slice to be overwritten for the return value - // to avoid memory allocation; can be nil. - Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet, error) + // Get returns n (or more) instances which form the replicas for the given key. + // bufDescs, bufHosts and bufZones are slices to be overwritten for the return value + // to avoid memory allocation; can be nil, or created with ring.MakeBuffersForGet(). + Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error) // GetAllHealthy returns all healthy instances in the ring, for the given operation. // This function doesn't check if the quorum is honored, so doesn't fail if the number - // of unhealthy ingesters is greater than the tolerated max unavailable. + // of unhealthy instances is greater than the tolerated max unavailable. GetAllHealthy(op Operation) (ReplicationSet, error) // GetReplicationSetForOperation returns all instances where the input operation should be executed. @@ -57,7 +63,9 @@ type ReadRing interface { GetReplicationSetForOperation(op Operation) (ReplicationSet, error) ReplicationFactor() int - IngesterCount() int + + // InstancesCount returns the number of instances in the ring. + InstancesCount() int // ShuffleShard returns a subring for the provided identifier (eg. a tenant ID) // and size (number of instances). @@ -71,26 +79,27 @@ type ReadRing interface { HasInstance(instanceID string) bool } -// Operation can be Read or Write -type Operation int - -// Values for Operation -const ( - Read Operation = iota - Write - Reporting // Special value for inquiring about health - - // BlocksSync is the operation run by the store-gateway to sync blocks. - BlocksSync +var ( + // Write operation that also extends replica set, if instance state is not ACTIVE. + Write = NewOp([]IngesterState{ACTIVE}, func(s IngesterState) bool { + // We do not want to Write to instances that are not ACTIVE, but we do want + // to write the extra replica somewhere. So we increase the size of the set + // of replicas for the key. + // NB unhealthy instances will be filtered later by defaultReplicationStrategy.Filter(). + return s != ACTIVE + }) - // BlocksRead is the operation run by the querier to query blocks via the store-gateway. - BlocksRead + // WriteNoExtend is like Write, but with no replicaset extension. + WriteNoExtend = NewOp([]IngesterState{ACTIVE}, nil) - // Ruler is the operation used for distributing rule groups between rulers. - Ruler + Read = NewOp([]IngesterState{ACTIVE, PENDING, LEAVING}, func(s IngesterState) bool { + // To match Write with extended replica set we have to also increase the + // size of the replica set for Read, but we can read from LEAVING ingesters. + return s != ACTIVE && s != LEAVING + }) - // Compactor is the operation used for distributing tenants/blocks across compactors. - Compactor + // Reporting is a special value for inquiring about health. + Reporting = allStatesRingOperation ) var ( @@ -101,9 +110,13 @@ var ( // not registered within the ring. ErrInstanceNotFound = errors.New("instance not found in the ring") - // ErrTooManyFailedIngesters is the error returned when there are too many failed ingesters for a + // ErrTooManyUnhealthyInstances is the error returned when there are too many failed instances for a // specific operation. - ErrTooManyFailedIngesters = errors.New("too many failed ingesters") + ErrTooManyUnhealthyInstances = errors.New("too many unhealthy instances in the ring") + + // ErrInconsistentTokensInfo is the error returned if, due to an internal bug, the mapping between + // a token and its own instance is missing or unknown. + ErrInconsistentTokensInfo = errors.New("inconsistent ring tokens information") ) // Config for a Ring @@ -112,7 +125,10 @@ type Config struct { HeartbeatTimeout time.Duration `yaml:"heartbeat_timeout"` ReplicationFactor int `yaml:"replication_factor"` ZoneAwarenessEnabled bool `yaml:"zone_awareness_enabled"` - ExtendWrites bool `yaml:"extend_writes"` + + // Whether the shuffle-sharding subring cache is disabled. This option is set + // internally and never exposed to the user. + SubringCacheDisabled bool `yaml:"-"` } // RegisterFlags adds the flags required to config this to the given FlagSet with a specified prefix @@ -127,7 +143,11 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.HeartbeatTimeout, prefix+"ring.heartbeat-timeout", time.Minute, "The heartbeat timeout after which ingesters are skipped for reads/writes.") f.IntVar(&cfg.ReplicationFactor, prefix+"distributor.replication-factor", 3, "The number of ingesters to write to and read from.") f.BoolVar(&cfg.ZoneAwarenessEnabled, prefix+"distributor.zone-awareness-enabled", false, "True to enable the zone-awareness and replicate ingested samples across different availability zones.") - f.BoolVar(&cfg.ExtendWrites, prefix+"distributor.extend-writes", true, "Try writing to an additional ingester in the presence of an ingester not in the ACTIVE state. It is useful to disable this along with -ingester.unregister-on-shutdown=false in order to not spread samples to extra ingesters during rolling restarts with consistent naming.") +} + +type instanceInfo struct { + InstanceID string + Zone string } // Ring holds the information about the members of the consistent hash ring. @@ -141,8 +161,13 @@ type Ring struct { mtx sync.RWMutex ringDesc *Desc - ringTokens []TokenDesc - ringTokensByZone map[string][]TokenDesc + ringTokens []uint32 + ringTokensByZone map[string][]uint32 + + // Maps a token with the information of the instance holding it. This map is immutable and + // cannot be chanced in place because it's shared "as is" between subrings (the only way to + // change it is to create a new one and replace it). + ringInstanceByToken map[uint32]instanceInfo // When did a set of instances change the last time (instance changing state or heartbeat is ignored for this timestamp). lastTopologyChange time.Time @@ -180,7 +205,7 @@ func New(cfg Config, name, key string, reg prometheus.Registerer) (*Ring, error) return nil, err } - return NewWithStoreClientAndStrategy(cfg, name, key, store, NewDefaultReplicationStrategy(cfg.ExtendWrites)) + return NewWithStoreClientAndStrategy(cfg, name, key, store, NewDefaultReplicationStrategy()) } func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client, strategy ReplicationStrategy) (*Ring, error) { @@ -234,7 +259,7 @@ func NewWithStoreClientAndStrategy(cfg Config, name, key string, store kv.Client func (r *Ring) loop(ctx context.Context) error { r.KVClient.WatchKey(ctx, r.key, func(value interface{}) bool { if value == nil { - level.Info(util.Logger).Log("msg", "ring doesn't exist in KV store yet") + level.Info(log.Logger).Log("msg", "ring doesn't exist in KV store yet") return true } @@ -256,8 +281,9 @@ func (r *Ring) loop(ctx context.Context) error { } now := time.Now() - ringTokens := ringDesc.getTokens() + ringTokens := ringDesc.GetTokens() ringTokensByZone := ringDesc.getTokensByZone() + ringInstanceByToken := ringDesc.getTokensInfo() ringZones := getZones(ringTokensByZone) r.mtx.Lock() @@ -265,6 +291,7 @@ func (r *Ring) loop(ctx context.Context) error { r.ringDesc = ringDesc r.ringTokens = ringTokens r.ringTokensByZone = ringTokensByZone + r.ringInstanceByToken = ringInstanceByToken r.ringZones = ringZones r.lastTopologyChange = now if r.shuffledSubringCache != nil { @@ -276,8 +303,8 @@ func (r *Ring) loop(ctx context.Context) error { return nil } -// Get returns n (or more) ingesters which form the replicas for the given key. -func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet, error) { +// Get returns n (or more) instances which form the replicas for the given key. +func (r *Ring) Get(key uint32, op Operation, bufDescs []InstanceDesc, bufHosts, bufZones []string) (ReplicationSet, error) { r.mtx.RLock() defer r.mtx.RUnlock() if r.ringDesc == nil || len(r.ringTokens) == 0 { @@ -285,51 +312,60 @@ func (r *Ring) Get(key uint32, op Operation, buf []IngesterDesc) (ReplicationSet } var ( - n = r.cfg.ReplicationFactor - ingesters = buf[:0] - distinctHosts = map[string]struct{}{} - distinctZones = map[string]struct{}{} - start = searchToken(r.ringTokens, key) - iterations = 0 + n = r.cfg.ReplicationFactor + instances = bufDescs[:0] + start = searchToken(r.ringTokens, key) + iterations = 0 + + // We use a slice instead of a map because it's faster to search within a + // slice than lookup a map for a very low number of items. + distinctHosts = bufHosts[:0] + distinctZones = bufZones[:0] ) for i := start; len(distinctHosts) < n && iterations < len(r.ringTokens); i++ { iterations++ // Wrap i around in the ring. i %= len(r.ringTokens) - - // We want n *distinct* ingesters && distinct zones. token := r.ringTokens[i] - if _, ok := distinctHosts[token.Ingester]; ok { + + info, ok := r.ringInstanceByToken[token] + if !ok { + // This should never happen unless a bug in the ring code. + return ReplicationSet{}, ErrInconsistentTokensInfo + } + + // We want n *distinct* instances && distinct zones. + if util.StringsContain(distinctHosts, info.InstanceID) { continue } - // Ignore if the ingesters don't have a zone set. - if r.cfg.ZoneAwarenessEnabled && token.Zone != "" { - if _, ok := distinctZones[token.Zone]; ok { + // Ignore if the instances don't have a zone set. + if r.cfg.ZoneAwarenessEnabled && info.Zone != "" { + if util.StringsContain(distinctZones, info.Zone) { continue } - distinctZones[token.Zone] = struct{}{} + distinctZones = append(distinctZones, info.Zone) } - distinctHosts[token.Ingester] = struct{}{} - ingester := r.ringDesc.Ingesters[token.Ingester] + distinctHosts = append(distinctHosts, info.InstanceID) + instance := r.ringDesc.Ingesters[info.InstanceID] // Check whether the replica set should be extended given we're including // this instance. - if r.strategy.ShouldExtendReplicaSet(ingester, op) { + if op.ShouldExtendReplicaSetOnState(instance.State) { n++ } - ingesters = append(ingesters, ingester) + instances = append(instances, instance) } - liveIngesters, maxFailure, err := r.strategy.Filter(ingesters, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled) + healthyInstances, maxFailure, err := r.strategy.Filter(instances, op, r.cfg.ReplicationFactor, r.cfg.HeartbeatTimeout, r.cfg.ZoneAwarenessEnabled) if err != nil { return ReplicationSet{}, err } return ReplicationSet{ - Ingesters: liveIngesters, + Ingesters: healthyInstances, MaxErrors: maxFailure, }, nil } @@ -343,15 +379,16 @@ func (r *Ring) GetAllHealthy(op Operation) (ReplicationSet, error) { return ReplicationSet{}, ErrEmptyRing } - ingesters := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) - for _, ingester := range r.ringDesc.Ingesters { - if r.IsHealthy(&ingester, op) { - ingesters = append(ingesters, ingester) + now := time.Now() + instances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) + for _, instance := range r.ringDesc.Ingesters { + if r.IsHealthy(&instance, op, now) { + instances = append(instances, instance) } } return ReplicationSet{ - Ingesters: ingesters, + Ingesters: instances, MaxErrors: 0, }, nil } @@ -366,13 +403,15 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro } // Build the initial replication set, excluding unhealthy instances. - healthyInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) + healthyInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) zoneFailures := make(map[string]struct{}) - for _, ingester := range r.ringDesc.Ingesters { - if r.IsHealthy(&ingester, op) { - healthyInstances = append(healthyInstances, ingester) + now := time.Now() + + for _, instance := range r.ringDesc.Ingesters { + if r.IsHealthy(&instance, op, now) { + healthyInstances = append(healthyInstances, instance) } else { - zoneFailures[ingester.Zone] = struct{}{} + zoneFailures[instance.Zone] = struct{}{} } } @@ -385,24 +424,24 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro // Given data is replicated to RF different zones, we can tolerate a number of // RF/2 failing zones. However, we need to protect from the case the ring currently // contains instances in a number of zones < RF. - numReplicatedZones := util.Min(len(r.ringZones), r.cfg.ReplicationFactor) + numReplicatedZones := util_math.Min(len(r.ringZones), r.cfg.ReplicationFactor) minSuccessZones := (numReplicatedZones / 2) + 1 maxUnavailableZones = minSuccessZones - 1 if len(zoneFailures) > maxUnavailableZones { - return ReplicationSet{}, ErrTooManyFailedIngesters + return ReplicationSet{}, ErrTooManyUnhealthyInstances } if len(zoneFailures) > 0 { // We remove all instances (even healthy ones) from zones with at least - // 1 failing ingester. Due to how replication works when zone-awareness is + // 1 failing instance. Due to how replication works when zone-awareness is // enabled (data is replicated to RF different zones), there's no benefit in // querying healthy instances from "failing zones". A zone is considered // failed if there is single error. - filteredInstances := make([]IngesterDesc, 0, len(r.ringDesc.Ingesters)) - for _, ingester := range healthyInstances { - if _, ok := zoneFailures[ingester.Zone]; !ok { - filteredInstances = append(filteredInstances, ingester) + filteredInstances := make([]InstanceDesc, 0, len(r.ringDesc.Ingesters)) + for _, instance := range healthyInstances { + if _, ok := zoneFailures[instance.Zone]; !ok { + filteredInstances = append(filteredInstances, instance) } } @@ -413,7 +452,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro // instance, we have to decrease the max unavailable zones accordingly. maxUnavailableZones -= len(zoneFailures) } else { - // Calculate the number of required ingesters; + // Calculate the number of required instances; // ensure we always require at least RF-1 when RF=3. numRequired := len(r.ringDesc.Ingesters) if numRequired < r.cfg.ReplicationFactor { @@ -423,7 +462,7 @@ func (r *Ring) GetReplicationSetForOperation(op Operation) (ReplicationSet, erro numRequired -= r.cfg.ReplicationFactor / 2 if len(healthyInstances) < numRequired { - return ReplicationSet{}, ErrTooManyFailedIngesters + return ReplicationSet{}, ErrTooManyUnhealthyInstances } maxErrors = len(healthyInstances) - numRequired @@ -445,21 +484,28 @@ func (r *Ring) Describe(ch chan<- *prometheus.Desc) { ch <- r.numTokensDesc } -func countTokens(ringDesc *Desc, tokens []TokenDesc) (map[string]uint32, map[string]uint32) { +// countTokens returns the number of tokens and tokens within the range for each instance. +// The ring read lock must be already taken when calling this function. +func (r *Ring) countTokens() (map[string]uint32, map[string]uint32) { owned := map[string]uint32{} numTokens := map[string]uint32{} - for i, token := range tokens { + for i, token := range r.ringTokens { var diff uint32 - if i+1 == len(tokens) { - diff = (math.MaxUint32 - token.Token) + tokens[0].Token + + // Compute how many tokens are within the range. + if i+1 == len(r.ringTokens) { + diff = (math.MaxUint32 - token) + r.ringTokens[0] } else { - diff = tokens[i+1].Token - token.Token + diff = r.ringTokens[i+1] - token } - numTokens[token.Ingester] = numTokens[token.Ingester] + 1 - owned[token.Ingester] = owned[token.Ingester] + diff + + info := r.ringInstanceByToken[token] + numTokens[info.InstanceID] = numTokens[info.InstanceID] + 1 + owned[info.InstanceID] = owned[info.InstanceID] + diff } - for id := range ringDesc.Ingesters { + // Set to 0 the number of owned tokens by instances which don't have tokens yet. + for id := range r.ringDesc.Ingesters { if _, ok := owned[id]; !ok { owned[id] = 0 numTokens[id] = 0 @@ -474,7 +520,7 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { r.mtx.RLock() defer r.mtx.RUnlock() - numTokens, ownedRange := countTokens(r.ringDesc, r.ringTokens) + numTokens, ownedRange := r.countTokens() for id, totalOwned := range ownedRange { ch <- prometheus.MustNewConstMetric( r.memberOwnershipDesc, @@ -499,14 +545,14 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { oldestTimestampByState[s] = 0 } - for _, ingester := range r.ringDesc.Ingesters { - s := ingester.State.String() - if !r.IsHealthy(&ingester, Reporting) { + for _, instance := range r.ringDesc.Ingesters { + s := instance.State.String() + if !r.IsHealthy(&instance, Reporting, time.Now()) { s = unhealthy } numByState[s]++ - if oldestTimestampByState[s] == 0 || ingester.Timestamp < oldestTimestampByState[s] { - oldestTimestampByState[s] = ingester.Timestamp + if oldestTimestampByState[s] == 0 || instance.Timestamp < oldestTimestampByState[s] { + oldestTimestampByState[s] = instance.Timestamp } } @@ -555,7 +601,7 @@ func (r *Ring) Collect(ch chan<- prometheus.Metric) { // set of instances, with a reduced number of overlapping instances between two identifiers. func (r *Ring) ShuffleShard(identifier string, size int) ReadRing { // Nothing to do if the shard size is not smaller then the actual ring. - if size <= 0 || r.IngesterCount() <= size { + if size <= 0 || r.InstancesCount() <= size { return r } @@ -578,7 +624,7 @@ func (r *Ring) ShuffleShard(identifier string, size int) ReadRing { // This function doesn't support caching. func (r *Ring) ShuffleShardWithLookback(identifier string, size int, lookbackPeriod time.Duration, now time.Time) ReadRing { // Nothing to do if the shard size is not smaller then the actual ring. - if size <= 0 || r.IngesterCount() <= size { + if size <= 0 || r.InstancesCount() <= size { return r } @@ -602,11 +648,11 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur actualZones = []string{""} } - shard := make(map[string]IngesterDesc, size) + shard := make(map[string]InstanceDesc, size) // We need to iterate zones always in the same order to guarantee stability. for _, zone := range actualZones { - var tokens []TokenDesc + var tokens []uint32 if r.cfg.ZoneAwarenessEnabled { tokens = r.ringTokensByZone[zone] @@ -636,21 +682,27 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur // Wrap p around in the ring. p %= len(tokens) + info, ok := r.ringInstanceByToken[tokens[p]] + if !ok { + // This should never happen unless a bug in the ring code. + panic(ErrInconsistentTokensInfo) + } + // Ensure we select an unique instance. - if _, ok := shard[tokens[p].Ingester]; ok { + if _, ok := shard[info.InstanceID]; ok { continue } - instance := r.ringDesc.Ingesters[tokens[p].Ingester] + instanceID := info.InstanceID + instance := r.ringDesc.Ingesters[instanceID] + shard[instanceID] = instance // If the lookback is enabled and this instance has been registered within the lookback period // then we should include it in the subring but continuing selecting instances. if lookbackPeriod > 0 && instance.RegisteredTimestamp >= lookbackUntil { - shard[tokens[p].Ingester] = instance continue } - shard[tokens[p].Ingester] = instance found = true break } @@ -672,10 +724,15 @@ func (r *Ring) shuffleShard(identifier string, size int, lookbackPeriod time.Dur cfg: r.cfg, strategy: r.strategy, ringDesc: shardDesc, - ringTokens: shardDesc.getTokens(), + ringTokens: shardDesc.GetTokens(), ringTokensByZone: shardTokensByZone, ringZones: getZones(shardTokensByZone), + // We reference the original map as is in order to avoid copying. It's safe to do + // because this map is immutable by design and it's a superset of the actual instances + // with the subring. + ringInstanceByToken: r.ringInstanceByToken, + // For caching to work, remember these values. lastTopologyChange: r.lastTopologyChange, } @@ -707,6 +764,10 @@ func (r *Ring) HasInstance(instanceID string) bool { } func (r *Ring) getCachedShuffledSubring(identifier string, size int) *Ring { + if r.cfg.SubringCacheDisabled { + return nil + } + r.mtx.RLock() defer r.mtx.RUnlock() @@ -731,7 +792,7 @@ func (r *Ring) getCachedShuffledSubring(identifier string, size int) *Ring { } func (r *Ring) setCachedShuffledSubring(identifier string, size int, subring *Ring) { - if subring == nil { + if subring == nil || r.cfg.SubringCacheDisabled { return } @@ -745,3 +806,42 @@ func (r *Ring) setCachedShuffledSubring(identifier string, size int, subring *Ri r.shuffledSubringCache[subringCacheKey{identifier: identifier, shardSize: size}] = subring } } + +// Operation describes which instances can be included in the replica set, based on their state. +// +// Implemented as bitmap, with upper 16-bits used for encoding extendReplicaSet, and lower 16-bits used for encoding healthy states. +type Operation uint32 + +// NewOp constructs new Operation with given "healthy" states for operation, and optional function to extend replica set. +// Result of calling shouldExtendReplicaSet is cached. +func NewOp(healthyStates []IngesterState, shouldExtendReplicaSet func(s IngesterState) bool) Operation { + op := Operation(0) + for _, s := range healthyStates { + op |= (1 << s) + } + + if shouldExtendReplicaSet != nil { + for _, s := range []IngesterState{ACTIVE, LEAVING, PENDING, JOINING, LEAVING, LEFT} { + if shouldExtendReplicaSet(s) { + op |= (0x10000 << s) + } + } + } + + return op +} + +// IsInstanceInStateHealthy is used during "filtering" phase to remove undesired instances based on their state. +func (op Operation) IsInstanceInStateHealthy(s IngesterState) bool { + return op&(1< 0 +} + +// ShouldExtendReplicaSetOnState returns true if given a state of instance that's going to be +// added to the replica set, the replica set size should be extended by 1 +// more instance for the given operation. +func (op Operation) ShouldExtendReplicaSetOnState(s IngesterState) bool { + return op&(0x10000< 0 +} + +// All states are healthy, no states extend replica set. +var allStatesRingOperation = Operation(0x0000ffff) diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go index 30e1646acf5..7bfadacad7b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.pb.go @@ -60,7 +60,7 @@ func (IngesterState) EnumDescriptor() ([]byte, []int) { } type Desc struct { - Ingesters map[string]IngesterDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + Ingesters map[string]InstanceDesc `protobuf:"bytes,1,rep,name=ingesters,proto3" json:"ingesters" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` } func (m *Desc) Reset() { *m = Desc{} } @@ -95,14 +95,14 @@ func (m *Desc) XXX_DiscardUnknown() { var xxx_messageInfo_Desc proto.InternalMessageInfo -func (m *Desc) GetIngesters() map[string]IngesterDesc { +func (m *Desc) GetIngesters() map[string]InstanceDesc { if m != nil { return m.Ingesters } return nil } -type IngesterDesc struct { +type InstanceDesc struct { Addr string `protobuf:"bytes,1,opt,name=addr,proto3" json:"addr,omitempty"` // Unix timestamp (with seconds precision) of the last heartbeat sent // by this instance. @@ -126,17 +126,17 @@ type IngesterDesc struct { RegisteredTimestamp int64 `protobuf:"varint,8,opt,name=registered_timestamp,json=registeredTimestamp,proto3" json:"registered_timestamp,omitempty"` } -func (m *IngesterDesc) Reset() { *m = IngesterDesc{} } -func (*IngesterDesc) ProtoMessage() {} -func (*IngesterDesc) Descriptor() ([]byte, []int) { +func (m *InstanceDesc) Reset() { *m = InstanceDesc{} } +func (*InstanceDesc) ProtoMessage() {} +func (*InstanceDesc) Descriptor() ([]byte, []int) { return fileDescriptor_26381ed67e202a6e, []int{1} } -func (m *IngesterDesc) XXX_Unmarshal(b []byte) error { +func (m *InstanceDesc) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } -func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { +func (m *InstanceDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { - return xxx_messageInfo_IngesterDesc.Marshal(b, m, deterministic) + return xxx_messageInfo_InstanceDesc.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) @@ -146,54 +146,54 @@ func (m *IngesterDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) return b[:n], nil } } -func (m *IngesterDesc) XXX_Merge(src proto.Message) { - xxx_messageInfo_IngesterDesc.Merge(m, src) +func (m *InstanceDesc) XXX_Merge(src proto.Message) { + xxx_messageInfo_InstanceDesc.Merge(m, src) } -func (m *IngesterDesc) XXX_Size() int { +func (m *InstanceDesc) XXX_Size() int { return m.Size() } -func (m *IngesterDesc) XXX_DiscardUnknown() { - xxx_messageInfo_IngesterDesc.DiscardUnknown(m) +func (m *InstanceDesc) XXX_DiscardUnknown() { + xxx_messageInfo_InstanceDesc.DiscardUnknown(m) } -var xxx_messageInfo_IngesterDesc proto.InternalMessageInfo +var xxx_messageInfo_InstanceDesc proto.InternalMessageInfo -func (m *IngesterDesc) GetAddr() string { +func (m *InstanceDesc) GetAddr() string { if m != nil { return m.Addr } return "" } -func (m *IngesterDesc) GetTimestamp() int64 { +func (m *InstanceDesc) GetTimestamp() int64 { if m != nil { return m.Timestamp } return 0 } -func (m *IngesterDesc) GetState() IngesterState { +func (m *InstanceDesc) GetState() IngesterState { if m != nil { return m.State } return ACTIVE } -func (m *IngesterDesc) GetTokens() []uint32 { +func (m *InstanceDesc) GetTokens() []uint32 { if m != nil { return m.Tokens } return nil } -func (m *IngesterDesc) GetZone() string { +func (m *InstanceDesc) GetZone() string { if m != nil { return m.Zone } return "" } -func (m *IngesterDesc) GetRegisteredTimestamp() int64 { +func (m *InstanceDesc) GetRegisteredTimestamp() int64 { if m != nil { return m.RegisteredTimestamp } @@ -203,41 +203,41 @@ func (m *IngesterDesc) GetRegisteredTimestamp() int64 { func init() { proto.RegisterEnum("ring.IngesterState", IngesterState_name, IngesterState_value) proto.RegisterType((*Desc)(nil), "ring.Desc") - proto.RegisterMapType((map[string]IngesterDesc)(nil), "ring.Desc.IngestersEntry") - proto.RegisterType((*IngesterDesc)(nil), "ring.IngesterDesc") + proto.RegisterMapType((map[string]InstanceDesc)(nil), "ring.Desc.IngestersEntry") + proto.RegisterType((*InstanceDesc)(nil), "ring.InstanceDesc") } func init() { proto.RegisterFile("ring.proto", fileDescriptor_26381ed67e202a6e) } var fileDescriptor_26381ed67e202a6e = []byte{ - // 421 bytes of a gzipped FileDescriptorProto + // 427 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x92, 0x31, 0x6f, 0xd3, 0x40, 0x1c, 0xc5, 0xef, 0x1f, 0x5f, 0x5c, 0xe7, 0x1f, 0x5a, 0x59, 0x57, 0x84, 0x4c, 0x85, 0x0e, 0xab, 0x93, 0x41, 0xc2, 0x15, 0x81, 0x01, 0x21, 0x31, 0xb4, 0xd4, 0x20, 0x5b, 0x51, 0xa8, 0x4c, 0xd4, - 0x15, 0x39, 0xcd, 0x61, 0xac, 0x12, 0xbb, 0xb2, 0x2f, 0x48, 0x65, 0xe2, 0x23, 0xf0, 0x05, 0xd8, - 0xf9, 0x28, 0x1d, 0x33, 0xa1, 0x4e, 0x88, 0x38, 0x0b, 0x63, 0x3f, 0x02, 0xba, 0x73, 0x23, 0x93, - 0xed, 0xfd, 0xfc, 0xde, 0xff, 0x3d, 0x0f, 0x87, 0x58, 0x66, 0x79, 0xea, 0x5f, 0x94, 0x85, 0x2c, - 0x18, 0x55, 0x7a, 0xef, 0x49, 0x9a, 0xc9, 0x4f, 0xf3, 0x89, 0x7f, 0x56, 0xcc, 0x0e, 0xd2, 0x22, - 0x2d, 0x0e, 0xb4, 0x39, 0x99, 0x7f, 0xd4, 0xa4, 0x41, 0xab, 0xe6, 0x68, 0xff, 0x07, 0x20, 0x3d, - 0x16, 0xd5, 0x19, 0x7b, 0x85, 0xbd, 0x2c, 0x4f, 0x45, 0x25, 0x45, 0x59, 0x39, 0xe0, 0x1a, 0x5e, - 0x7f, 0x70, 0xdf, 0xd7, 0xed, 0xca, 0xf6, 0xc3, 0xb5, 0x17, 0xe4, 0xb2, 0xbc, 0x3c, 0xa2, 0x57, - 0xbf, 0x1f, 0x92, 0xb8, 0xbd, 0xd8, 0x3b, 0xc1, 0x9d, 0xcd, 0x08, 0xb3, 0xd1, 0x38, 0x17, 0x97, - 0x0e, 0xb8, 0xe0, 0xf5, 0x62, 0x25, 0x99, 0x87, 0xdd, 0x2f, 0xc9, 0xe7, 0xb9, 0x70, 0x3a, 0x2e, - 0x78, 0xfd, 0x01, 0x6b, 0xea, 0xd7, 0x67, 0x6a, 0x26, 0x6e, 0x02, 0x2f, 0x3b, 0x2f, 0x20, 0xa2, - 0x56, 0xc7, 0x36, 0xf6, 0x7f, 0x01, 0xde, 0xf9, 0x3f, 0xc1, 0x18, 0xd2, 0x64, 0x3a, 0x2d, 0x6f, - 0x7b, 0xb5, 0x66, 0x0f, 0xb0, 0x27, 0xb3, 0x99, 0xa8, 0x64, 0x32, 0xbb, 0xd0, 0xe5, 0x46, 0xdc, - 0x7e, 0x60, 0x8f, 0xb0, 0x5b, 0xc9, 0x44, 0x0a, 0xc7, 0x70, 0xc1, 0xdb, 0x19, 0xec, 0x6e, 0xce, - 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, 0x2e, 0xf2, 0xca, 0x31, 0x5d, 0xc3, - 0xdb, 0x8e, 0x6f, 0x49, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, 0x35, 0xa3, 0x4a, 0xb3, 0xa7, 0x78, - 0xb7, 0x14, 0x69, 0xa6, 0x3a, 0xc4, 0xf4, 0x43, 0xbb, 0x6f, 0xe9, 0xfd, 0xdd, 0xd6, 0x1b, 0xaf, - 0xad, 0x88, 0x5a, 0xd4, 0xee, 0x46, 0xd4, 0xea, 0xda, 0xe6, 0xe3, 0x21, 0x6e, 0x6f, 0xfc, 0x02, - 0x43, 0x34, 0x0f, 0x5f, 0x8f, 0xc3, 0xd3, 0xc0, 0x26, 0xac, 0x8f, 0x5b, 0xc3, 0xe0, 0xf0, 0x34, - 0x1c, 0xbd, 0xb5, 0x41, 0xc1, 0x49, 0x30, 0x3a, 0x56, 0xd0, 0x51, 0x10, 0xbd, 0x0b, 0x47, 0x0a, - 0x0c, 0x66, 0x21, 0x1d, 0x06, 0x6f, 0xc6, 0x36, 0x3d, 0x7a, 0xbe, 0x58, 0x72, 0x72, 0xbd, 0xe4, - 0xe4, 0x66, 0xc9, 0xe1, 0x5b, 0xcd, 0xe1, 0x67, 0xcd, 0xe1, 0xaa, 0xe6, 0xb0, 0xa8, 0x39, 0xfc, - 0xa9, 0x39, 0xfc, 0xad, 0x39, 0xb9, 0xa9, 0x39, 0x7c, 0x5f, 0x71, 0xb2, 0x58, 0x71, 0x72, 0xbd, - 0xe2, 0x64, 0x62, 0xea, 0x37, 0xf0, 0xec, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x56, 0xd8, 0x87, - 0x71, 0x46, 0x02, 0x00, 0x00, + 0x15, 0x39, 0xc9, 0x61, 0xac, 0x12, 0xbb, 0xb2, 0x2f, 0x48, 0x65, 0xe2, 0x23, 0xf0, 0x05, 0xd8, + 0xf9, 0x28, 0x1d, 0x33, 0xa1, 0x4e, 0x88, 0x38, 0x0b, 0x63, 0x3f, 0x02, 0xba, 0x73, 0x23, 0xd3, + 0xed, 0xfd, 0xfc, 0xde, 0xbd, 0xf7, 0x1f, 0x8c, 0x58, 0x66, 0x79, 0xea, 0x9f, 0x97, 0x85, 0x2c, + 0x18, 0x55, 0x7a, 0xef, 0x49, 0x9a, 0xc9, 0x4f, 0x8b, 0x89, 0x3f, 0x2d, 0xe6, 0x07, 0x69, 0x91, + 0x16, 0x07, 0xda, 0x9c, 0x2c, 0x3e, 0x6a, 0xd2, 0xa0, 0x55, 0xf3, 0x68, 0xff, 0x07, 0x20, 0x3d, + 0x16, 0xd5, 0x94, 0xbd, 0xc2, 0x5e, 0x96, 0xa7, 0xa2, 0x92, 0xa2, 0xac, 0x1c, 0x70, 0x0d, 0xaf, + 0x3f, 0xb8, 0xef, 0xeb, 0x76, 0x65, 0xfb, 0xe1, 0xc6, 0x0b, 0x72, 0x59, 0x5e, 0x1c, 0xd1, 0xcb, + 0xdf, 0x0f, 0x49, 0xdc, 0xbe, 0xd8, 0x3b, 0xc1, 0x9d, 0xdb, 0x11, 0x66, 0xa3, 0x71, 0x26, 0x2e, + 0x1c, 0x70, 0xc1, 0xeb, 0xc5, 0x4a, 0x32, 0x0f, 0xbb, 0x5f, 0x92, 0xcf, 0x0b, 0xe1, 0x74, 0x5c, + 0xf0, 0xfa, 0x03, 0xd6, 0xd4, 0x87, 0x79, 0x25, 0x93, 0x7c, 0x2a, 0xd4, 0x4c, 0xdc, 0x04, 0x5e, + 0x76, 0x5e, 0x40, 0x44, 0xad, 0x8e, 0x6d, 0xec, 0xff, 0x02, 0xbc, 0xf3, 0x7f, 0x82, 0x31, 0xa4, + 0xc9, 0x6c, 0x56, 0xde, 0xf4, 0x6a, 0xcd, 0x1e, 0x60, 0x4f, 0x66, 0x73, 0x51, 0xc9, 0x64, 0x7e, + 0xae, 0xcb, 0x8d, 0xb8, 0xfd, 0xc0, 0x1e, 0x61, 0xb7, 0x92, 0x89, 0x14, 0x8e, 0xe1, 0x82, 0xb7, + 0x33, 0xd8, 0xdd, 0xcc, 0x36, 0xd7, 0xbe, 0x57, 0x56, 0xdc, 0x24, 0xd8, 0x3d, 0x34, 0x65, 0x71, + 0x26, 0xf2, 0xca, 0x31, 0x5d, 0xc3, 0xdb, 0x8e, 0x6f, 0x48, 0x8d, 0x7e, 0x2d, 0x72, 0xe1, 0x6c, + 0x35, 0xa3, 0x4a, 0xb3, 0xa7, 0x78, 0xb7, 0x14, 0x69, 0xa6, 0x3a, 0xc4, 0xec, 0x43, 0xbb, 0x6f, + 0xe9, 0xfd, 0xdd, 0xd6, 0x1b, 0x6f, 0xac, 0x88, 0x5a, 0xd4, 0xee, 0x46, 0xd4, 0xea, 0xda, 0xe6, + 0xe3, 0x21, 0x6e, 0xdf, 0x3a, 0x81, 0x21, 0x9a, 0x87, 0xaf, 0xc7, 0xe1, 0x69, 0x60, 0x13, 0xd6, + 0xc7, 0xad, 0x61, 0x70, 0x78, 0x1a, 0x8e, 0xde, 0xda, 0xa0, 0xe0, 0x24, 0x18, 0x1d, 0x2b, 0xe8, + 0x28, 0x88, 0xde, 0x85, 0x23, 0x05, 0x06, 0xb3, 0x90, 0x0e, 0x83, 0x37, 0x63, 0x9b, 0x1e, 0x3d, + 0x5f, 0xae, 0x38, 0xb9, 0x5a, 0x71, 0x72, 0xbd, 0xe2, 0xf0, 0xad, 0xe6, 0xf0, 0xb3, 0xe6, 0x70, + 0x59, 0x73, 0x58, 0xd6, 0x1c, 0xfe, 0xd4, 0x1c, 0xfe, 0xd6, 0x9c, 0x5c, 0xd7, 0x1c, 0xbe, 0xaf, + 0x39, 0x59, 0xae, 0x39, 0xb9, 0x5a, 0x73, 0x32, 0x31, 0xf5, 0x3f, 0xf0, 0xec, 0x5f, 0x00, 0x00, + 0x00, 0xff, 0xff, 0x79, 0x5b, 0xe1, 0x8b, 0x46, 0x02, 0x00, 0x00, } func (x IngesterState) String() string { @@ -278,14 +278,14 @@ func (this *Desc) Equal(that interface{}) bool { } return true } -func (this *IngesterDesc) Equal(that interface{}) bool { +func (this *InstanceDesc) Equal(that interface{}) bool { if that == nil { return this == nil } - that1, ok := that.(*IngesterDesc) + that1, ok := that.(*InstanceDesc) if !ok { - that2, ok := that.(IngesterDesc) + that2, ok := that.(InstanceDesc) if ok { that1 = &that2 } else { @@ -333,7 +333,7 @@ func (this *Desc) GoString() string { keysForIngesters = append(keysForIngesters, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForIngesters) - mapStringForIngesters := "map[string]IngesterDesc{" + mapStringForIngesters := "map[string]InstanceDesc{" for _, k := range keysForIngesters { mapStringForIngesters += fmt.Sprintf("%#v: %#v,", k, this.Ingesters[k]) } @@ -344,12 +344,12 @@ func (this *Desc) GoString() string { s = append(s, "}") return strings.Join(s, "") } -func (this *IngesterDesc) GoString() string { +func (this *InstanceDesc) GoString() string { if this == nil { return "nil" } s := make([]string, 0, 10) - s = append(s, "&ring.IngesterDesc{") + s = append(s, "&ring.InstanceDesc{") s = append(s, "Addr: "+fmt.Sprintf("%#v", this.Addr)+",\n") s = append(s, "Timestamp: "+fmt.Sprintf("%#v", this.Timestamp)+",\n") s = append(s, "State: "+fmt.Sprintf("%#v", this.State)+",\n") @@ -414,7 +414,7 @@ func (m *Desc) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *IngesterDesc) Marshal() (dAtA []byte, err error) { +func (m *InstanceDesc) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) @@ -424,12 +424,12 @@ func (m *IngesterDesc) Marshal() (dAtA []byte, err error) { return dAtA[:n], nil } -func (m *IngesterDesc) MarshalTo(dAtA []byte) (int, error) { +func (m *InstanceDesc) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } -func (m *IngesterDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { +func (m *InstanceDesc) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int @@ -513,7 +513,7 @@ func (m *Desc) Size() (n int) { return n } -func (m *IngesterDesc) Size() (n int) { +func (m *InstanceDesc) Size() (n int) { if m == nil { return 0 } @@ -561,7 +561,7 @@ func (this *Desc) String() string { keysForIngesters = append(keysForIngesters, k) } github_com_gogo_protobuf_sortkeys.Strings(keysForIngesters) - mapStringForIngesters := "map[string]IngesterDesc{" + mapStringForIngesters := "map[string]InstanceDesc{" for _, k := range keysForIngesters { mapStringForIngesters += fmt.Sprintf("%v: %v,", k, this.Ingesters[k]) } @@ -572,11 +572,11 @@ func (this *Desc) String() string { }, "") return s } -func (this *IngesterDesc) String() string { +func (this *InstanceDesc) String() string { if this == nil { return "nil" } - s := strings.Join([]string{`&IngesterDesc{`, + s := strings.Join([]string{`&InstanceDesc{`, `Addr:` + fmt.Sprintf("%v", this.Addr) + `,`, `Timestamp:` + fmt.Sprintf("%v", this.Timestamp) + `,`, `State:` + fmt.Sprintf("%v", this.State) + `,`, @@ -654,10 +654,10 @@ func (m *Desc) Unmarshal(dAtA []byte) error { return io.ErrUnexpectedEOF } if m.Ingesters == nil { - m.Ingesters = make(map[string]IngesterDesc) + m.Ingesters = make(map[string]InstanceDesc) } var mapkey string - mapvalue := &IngesterDesc{} + mapvalue := &InstanceDesc{} for iNdEx < postIndex { entryPreIndex := iNdEx var wire uint64 @@ -731,7 +731,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { if postmsgIndex > l { return io.ErrUnexpectedEOF } - mapvalue = &IngesterDesc{} + mapvalue = &InstanceDesc{} if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { return err } @@ -777,7 +777,7 @@ func (m *Desc) Unmarshal(dAtA []byte) error { } return nil } -func (m *IngesterDesc) Unmarshal(dAtA []byte) error { +func (m *InstanceDesc) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -800,10 +800,10 @@ func (m *IngesterDesc) Unmarshal(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: IngesterDesc: wiretype end group for non-group") + return fmt.Errorf("proto: InstanceDesc: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: IngesterDesc: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: InstanceDesc: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto index 2adc91a806c..4eab6f733cc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/ring.proto @@ -8,11 +8,11 @@ option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; message Desc { - map ingesters = 1 [(gogoproto.nullable) = false]; + map ingesters = 1 [(gogoproto.nullable) = false]; reserved 2; } -message IngesterDesc { +message InstanceDesc { reserved 4, 5; // old, deprecated fields string addr = 1; diff --git a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go index 6f28988eeda..b1cf8210c75 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ring/util.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ring/util.go @@ -10,7 +10,7 @@ import ( ) // GenerateTokens make numTokens unique random tokens, none of which clash -// with takenTokens. +// with takenTokens. Generated tokens are sorted. func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { if numTokens <= 0 { return []uint32{} @@ -23,7 +23,7 @@ func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { used[v] = true } - tokens := []uint32{} + tokens := make([]uint32, 0, numTokens) for i := 0; i < numTokens; { candidate := r.Uint32() if used[candidate] { @@ -34,6 +34,11 @@ func GenerateTokens(numTokens int, takenTokens []uint32) []uint32 { i++ } + // Ensure returned tokens are sorted. + sort.Slice(tokens, func(i, j int) bool { + return tokens[i] < tokens[j] + }) + return tokens } @@ -116,9 +121,17 @@ func WaitRingStability(ctx context.Context, r *Ring, op Operation, minStability, } } +// MakeBuffersForGet returns buffers to use with Ring.Get(). +func MakeBuffersForGet() (bufDescs []InstanceDesc, bufHosts, bufZones []string) { + bufDescs = make([]InstanceDesc, 0, GetBufferSize) + bufHosts = make([]string, 0, GetBufferSize) + bufZones = make([]string, 0, GetBufferSize) + return +} + // getZones return the list zones from the provided tokens. The returned list // is guaranteed to be sorted. -func getZones(tokens map[string][]TokenDesc) []string { +func getZones(tokens map[string][]uint32) []string { var zones []string for zone := range tokens { @@ -130,9 +143,9 @@ func getZones(tokens map[string][]TokenDesc) []string { } // searchToken returns the offset of the tokens entry holding the range for the provided key. -func searchToken(tokens []TokenDesc, key uint32) int { +func searchToken(tokens []uint32, key uint32) int { i := sort.Search(len(tokens), func(x int) bool { - return tokens[x].Token > key + return tokens[x] > key }) if i >= len(tokens) { i = 0 diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go index ac3d959b8c0..5a579d80fcd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/api.go @@ -24,7 +24,7 @@ import ( "github.com/cortexproject/cortex/pkg/ruler/rules" store "github.com/cortexproject/cortex/pkg/ruler/rules" "github.com/cortexproject/cortex/pkg/tenant" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // In order to reimplement the prometheus rules API, a large amount of code was copied over @@ -134,7 +134,7 @@ func NewAPI(r *Ruler, s rules.RuleStore) *API { } func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, err := tenant.TenantID(req.Context()) if err != nil || userID == "" { level.Error(logger).Log("msg", "error extracting org id from context", "err", err) @@ -226,7 +226,7 @@ func (a *API) PrometheusRules(w http.ResponseWriter, req *http.Request) { } func (a *API) PrometheusAlerts(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, err := tenant.TenantID(req.Context()) if err != nil || userID == "" { level.Error(logger).Log("msg", "error extracting org id from context", "err", err) @@ -381,7 +381,7 @@ func parseRequest(req *http.Request, requireNamespace, requireGroup bool) (strin } func (a *API) ListRules(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, namespace, _, err := parseRequest(req, false, false) if err != nil { @@ -415,7 +415,7 @@ func (a *API) ListRules(w http.ResponseWriter, req *http.Request) { } func (a *API) GetRuleGroup(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, namespace, groupName, err := parseRequest(req, true, true) if err != nil { respondError(logger, w, err.Error()) @@ -437,7 +437,7 @@ func (a *API) GetRuleGroup(w http.ResponseWriter, req *http.Request) { } func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, namespace, _, err := parseRequest(req, true, false) if err != nil { respondError(logger, w, err.Error()) @@ -506,7 +506,7 @@ func (a *API) CreateRuleGroup(w http.ResponseWriter, req *http.Request) { } func (a *API) DeleteNamespace(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, namespace, _, err := parseRequest(req, true, false) if err != nil { @@ -528,7 +528,7 @@ func (a *API) DeleteNamespace(w http.ResponseWriter, req *http.Request) { } func (a *API) DeleteRuleGroup(w http.ResponseWriter, req *http.Request) { - logger := util.WithContext(req.Context(), util.Logger) + logger := util_log.WithContext(req.Context(), util_log.Logger) userID, namespace, groupName, err := parseRequest(req, true, true) if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go index a3f0dd976d0..c8a2be40332 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/compat.go @@ -8,6 +8,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/notifier" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/value" "github.com/prometheus/prometheus/promql" "github.com/prometheus/prometheus/rules" "github.com/prometheus/prometheus/storage" @@ -22,15 +23,26 @@ type Pusher interface { } type pusherAppender struct { - ctx context.Context - pusher Pusher - labels []labels.Labels - samples []client.Sample - userID string + ctx context.Context + pusher Pusher + labels []labels.Labels + samples []client.Sample + userID string + evaluationDelay time.Duration } func (a *pusherAppender) Add(l labels.Labels, t int64, v float64) (uint64, error) { a.labels = append(a.labels, l) + + // Adapt staleness markers for ruler evaluation delay. As the upstream code + // is using the actual time, when there is a no longer available series. + // This then causes 'out of order' append failures once the series is + // becoming available again. + // see https://github.com/prometheus/prometheus/blob/6c56a1faaaad07317ff585bda75b99bdba0517ad/rules/manager.go#L647-L660 + if a.evaluationDelay > 0 && value.IsStaleNaN(v) { + t -= a.evaluationDelay.Milliseconds() + } + a.samples = append(a.samples, client.Sample{ TimestampMs: t, Value: v, @@ -59,16 +71,18 @@ func (a *pusherAppender) Rollback() error { // PusherAppendable fulfills the storage.Appendable interface for prometheus manager type PusherAppendable struct { - pusher Pusher - userID string + pusher Pusher + userID string + rulesLimits RulesLimits } // Appender returns a storage.Appender func (t *PusherAppendable) Appender(ctx context.Context) storage.Appender { return &pusherAppender{ - ctx: ctx, - pusher: t.pusher, - userID: t.userID, + ctx: ctx, + pusher: t.pusher, + userID: t.userID, + evaluationDelay: t.rulesLimits.EvaluationDelay(t.userID), } } @@ -113,7 +127,7 @@ type ManagerFactory func(ctx context.Context, userID string, notifier *notifier. func DefaultTenantManagerFactory(cfg Config, p Pusher, q storage.Queryable, engine *promql.Engine, overrides RulesLimits) ManagerFactory { return func(ctx context.Context, userID string, notifier *notifier.Manager, logger log.Logger, reg prometheus.Registerer) RulesManager { return rules.NewManager(&rules.ManagerOptions{ - Appendable: &PusherAppendable{pusher: p, userID: userID}, + Appendable: &PusherAppendable{pusher: p, userID: userID, rulesLimits: overrides}, Queryable: q, QueryFunc: engineQueryFunc(engine, q, overrides, userID), Context: user.InjectOrgID(ctx, userID), diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go index 454e1290e23..62f83fa8313 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/lifecycle.go @@ -4,7 +4,7 @@ import ( "github.com/cortexproject/cortex/pkg/ring" ) -func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { +func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) { // When we initialize the ruler instance in the ring we want to start from // a clean situation, so whatever is the state we set it ACTIVE, while we keep existing // tokens (if any). @@ -13,7 +13,7 @@ func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.De tokens = instanceDesc.GetTokens() } - _, takenTokens := ringDesc.TokensFor(instanceID) + takenTokens := ringDesc.GetTokens() newTokens := ring.GenerateTokens(r.cfg.Ring.NumTokens-len(tokens), takenTokens) // Tokens sorting will be enforced by the parent caller. @@ -24,5 +24,5 @@ func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.De func (r *Ruler) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} func (r *Ruler) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) { +func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go index 5057215e014..b2a1a0e5518 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager.go @@ -171,8 +171,7 @@ func (r *DefaultMultiTenantManager) newManager(ctx context.Context, userID strin reg := prometheus.NewRegistry() r.userManagerMetrics.AddUserRegistry(userID, reg) - logger := log.With(r.logger, "user", userID) - return r.managerFactory(ctx, userID, notifier, logger, reg), nil + return r.managerFactory(ctx, userID, notifier, r.logger, reg), nil } func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string) (*notifier.Manager, error) { @@ -206,7 +205,7 @@ func (r *DefaultMultiTenantManager) getOrCreateNotifier(userID string) (*notifie }, }, log.With(r.logger, "user", userID)) - go n.run() + n.run() // This should never fail, unless there's a programming mistake. if err := n.applyConfig(r.notifierCfg); err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go index c14c966af3d..20277448086 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/manager_metrics.go @@ -11,16 +11,17 @@ import ( type ManagerMetrics struct { regs *util.UserRegistries - EvalDuration *prometheus.Desc - IterationDuration *prometheus.Desc - IterationsMissed *prometheus.Desc - IterationsScheduled *prometheus.Desc - EvalTotal *prometheus.Desc - EvalFailures *prometheus.Desc - GroupInterval *prometheus.Desc - GroupLastEvalTime *prometheus.Desc - GroupLastDuration *prometheus.Desc - GroupRules *prometheus.Desc + EvalDuration *prometheus.Desc + IterationDuration *prometheus.Desc + IterationsMissed *prometheus.Desc + IterationsScheduled *prometheus.Desc + EvalTotal *prometheus.Desc + EvalFailures *prometheus.Desc + GroupInterval *prometheus.Desc + GroupLastEvalTime *prometheus.Desc + GroupLastDuration *prometheus.Desc + GroupRules *prometheus.Desc + GroupLastEvalSamples *prometheus.Desc } // NewManagerMetrics returns a ManagerMetrics struct @@ -88,6 +89,12 @@ func NewManagerMetrics() *ManagerMetrics { []string{"user", "rule_group"}, nil, ), + GroupLastEvalSamples: prometheus.NewDesc( + "cortex_prometheus_last_evaluation_samples", + "The number of samples returned during the last rule group evaluation.", + []string{"user", "rule_group"}, + nil, + ), } } @@ -113,6 +120,7 @@ func (m *ManagerMetrics) Describe(out chan<- *prometheus.Desc) { out <- m.GroupLastEvalTime out <- m.GroupLastDuration out <- m.GroupRules + out <- m.GroupLastEvalSamples } // Collect implements the Collector interface @@ -135,4 +143,5 @@ func (m *ManagerMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastEvalTime, "prometheus_rule_group_last_evaluation_timestamp_seconds", "rule_group") data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastDuration, "prometheus_rule_group_last_duration_seconds", "rule_group") data.SendSumOfGaugesPerUserWithLabels(out, m.GroupRules, "prometheus_rule_group_rules", "rule_group") + data.SendSumOfGaugesPerUserWithLabels(out, m.GroupLastEvalSamples, "prometheus_rule_group_last_evaluation_samples", "rule_group") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go index 746e4e8ba60..5f0f6092571 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/notifier.go @@ -2,6 +2,7 @@ package ruler import ( "context" + "flag" "fmt" "net/url" "regexp" @@ -16,8 +17,21 @@ import ( "github.com/prometheus/prometheus/discovery" "github.com/prometheus/prometheus/discovery/dns" "github.com/prometheus/prometheus/notifier" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/tls" ) +type NotifierConfig struct { + TLS tls.ClientConfig `yaml:",inline"` + BasicAuth util.BasicAuth `yaml:",inline"` +} + +func (cfg *NotifierConfig) RegisterFlags(f *flag.FlagSet) { + cfg.TLS.RegisterFlagsWithPrefix("ruler.alertmanager-client", f) + cfg.BasicAuth.RegisterFlagsWithPrefix("ruler.alertmanager-client.", f) +} + // rulerNotifier bundles a notifier.Manager together with an associated // Alertmanager service discovery manager and handles the lifecycle // of both actors. @@ -39,6 +53,7 @@ func newRulerNotifier(o *notifier.Options, l gklog.Logger) *rulerNotifier { } } +// run starts the notifier. This function doesn't block and returns immediately. func (rn *rulerNotifier) run() { rn.wg.Add(2) go func() { @@ -149,13 +164,21 @@ func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.Alertm PathPrefix: url.Path, Timeout: model.Duration(rulerConfig.NotificationTimeout), ServiceDiscoveryConfigs: sdConfig, + HTTPClientConfig: config_util.HTTPClientConfig{ + TLSConfig: config_util.TLSConfig{ + CAFile: rulerConfig.Notifier.TLS.CAPath, + CertFile: rulerConfig.Notifier.TLS.CertPath, + KeyFile: rulerConfig.Notifier.TLS.KeyPath, + InsecureSkipVerify: rulerConfig.Notifier.TLS.InsecureSkipVerify, + ServerName: rulerConfig.Notifier.TLS.ServerName, + }, + }, } + // Check the URL for basic authentication information first if url.User != nil { - amConfig.HTTPClientConfig = config_util.HTTPClientConfig{ - BasicAuth: &config_util.BasicAuth{ - Username: url.User.Username(), - }, + amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{ + Username: url.User.Username(), } if password, isSet := url.User.Password(); isSet { @@ -163,5 +186,13 @@ func amConfigFromURL(rulerConfig *Config, url *url.URL, apiVersion config.Alertm } } + // Override URL basic authentication configs with hard coded config values if present + if rulerConfig.Notifier.BasicAuth.IsEnabled() { + amConfig.HTTPClientConfig.BasicAuth = &config_util.BasicAuth{ + Username: rulerConfig.Notifier.BasicAuth.Username, + Password: config_util.Secret(rulerConfig.Notifier.BasicAuth.Password), + } + } + return amConfig } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go index 6d8c6ab5bfc..6b4433d3ace 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler.go @@ -64,12 +64,9 @@ type Config struct { // This is used for template expansion in alerts; must be a valid URL. ExternalURL flagext.URLValue `yaml:"external_url"` // GRPC Client configuration. - ClientTLSConfig grpcclient.ConfigWithTLS `yaml:"ruler_client"` + ClientTLSConfig grpcclient.Config `yaml:"ruler_client"` // How frequently to evaluate rules by default. EvaluationInterval time.Duration `yaml:"evaluation_interval"` - // Deprecated. Replaced with pkg/util/validation/Limits.RulerEvaluationDelay field. - // TODO: To be removed in Cortex 1.6. - EvaluationDelay time.Duration `yaml:"evaluation_delay_duration"` // How frequently to poll for updated rules. PollInterval time.Duration `yaml:"poll_interval"` // Rule Storage and Polling configuration. @@ -89,6 +86,8 @@ type Config struct { NotificationQueueCapacity int `yaml:"notification_queue_capacity"` // HTTP timeout duration when sending notifications to the Alertmanager. NotificationTimeout time.Duration `yaml:"notification_timeout"` + // Client configs for interacting with the Alertmanager + Notifier NotifierConfig `yaml:"alertmanager_client"` // Max time to tolerate outage for restoring "for" state of alert. OutageTolerance time.Duration `yaml:"for_outage_tolerance"` @@ -133,6 +132,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ClientTLSConfig.RegisterFlagsWithPrefix("ruler.client", f) cfg.StoreConfig.RegisterFlags(f) cfg.Ring.RegisterFlags(f) + cfg.Notifier.RegisterFlags(f) // Deprecated Flags that will be maintained to avoid user disruption flagext.DeprecatedFlag(f, "ruler.client-timeout", "This flag has been renamed to ruler.configs.client-timeout") @@ -142,7 +142,6 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { cfg.ExternalURL.URL, _ = url.Parse("") // Must be non-nil f.Var(&cfg.ExternalURL, "ruler.external.url", "URL of alerts return path.") f.DurationVar(&cfg.EvaluationInterval, "ruler.evaluation-interval", 1*time.Minute, "How frequently to evaluate rules") - f.DurationVar(&cfg.EvaluationDelay, "ruler.evaluation-delay-duration-deprecated", 0, "Deprecated. Please use -ruler.evaluation-delay-duration instead.") f.DurationVar(&cfg.PollInterval, "ruler.poll-interval", 1*time.Minute, "How frequently to poll for rule changes") f.StringVar(&cfg.AlertmanagerURL, "ruler.alertmanager-url", "", "Comma-separated list of URL(s) of the Alertmanager(s) to send notifications to. Each Alertmanager URL is treated as a separate group in the configuration. Multiple Alertmanagers in HA per group can be supported by using DNS resolution via -ruler.alertmanager-discovery.") @@ -284,7 +283,7 @@ func enableSharding(r *Ruler, ringStore kv.Client) error { return errors.Wrap(err, "failed to initialize ruler's lifecycler") } - r.ring, err = ring.NewWithStoreClientAndStrategy(r.cfg.Ring.ToRingConfig(), rulerRingName, ring.RulerRingKey, ringStore, rulerReplicationStrategy{}) + r.ring, err = ring.NewWithStoreClientAndStrategy(r.cfg.Ring.ToRingConfig(), rulerRingName, ring.RulerRingKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy()) if err != nil { return errors.Wrap(err, "failed to initialize ruler's ring") } @@ -368,7 +367,7 @@ func tokenForGroup(g *store.RuleGroupDesc) uint32 { func instanceOwnsRuleGroup(r ring.ReadRing, g *rules.RuleGroupDesc, instanceAddr string) (bool, error) { hash := tokenForGroup(g) - rlrs, err := r.Get(hash, ring.Ruler, []ring.IngesterDesc{}) + rlrs, err := r.Get(hash, RingOp, nil, nil, nil) if err != nil { return false, errors.Wrap(err, "error reading ring to verify rule group ownership") } @@ -410,7 +409,7 @@ func (r *Ruler) run(ctx context.Context) error { var ringLastState ring.ReplicationSet if r.cfg.EnableSharding { - ringLastState, _ = r.ring.GetAllHealthy(ring.Ruler) + ringLastState, _ = r.ring.GetAllHealthy(RingOp) ringTicker := time.NewTicker(util.DurationWithJitter(r.cfg.RingCheckPeriod, 0.2)) defer ringTicker.Stop() ringTickerChan = ringTicker.C @@ -426,7 +425,7 @@ func (r *Ruler) run(ctx context.Context) error { case <-ringTickerChan: // We ignore the error because in case of error it will return an empty // replication set which we use to compare with the previous state. - currRingState, _ := r.ring.GetAllHealthy(ring.Ruler) + currRingState, _ := r.ring.GetAllHealthy(RingOp) if ring.HasReplicationSetChanged(ringLastState, currRingState) { ringLastState = currRingState @@ -570,7 +569,7 @@ func filterRuleGroups(userID string, ruleGroups []*store.RuleGroupDesc, ring rin owned, err := instanceOwnsRuleGroup(ring, g, instanceAddr) if err != nil { ringCheckErrors.Inc() - level.Error(log).Log("msg", "failed to create group for user", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) + level.Error(log).Log("msg", "failed to check if the ruler replica owns the rule group", "user", userID, "namespace", g.Namespace, "group", g.Name, "err", err) continue } @@ -688,7 +687,7 @@ func (r *Ruler) getLocalRules(userID string) ([]*GroupStateDesc, error) { } func (r *Ruler) getShardedRules(ctx context.Context) ([]*GroupStateDesc, error) { - rulers, err := r.ring.GetReplicationSetForOperation(ring.Ruler) + rulers, err := r.ring.GetReplicationSetForOperation(RingOp) if err != nil { return nil, err } diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_replication_strategy.go deleted file mode 100644 index 0d16572fef1..00000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_replication_strategy.go +++ /dev/null @@ -1,37 +0,0 @@ -package ruler - -import ( - "time" - - "github.com/pkg/errors" - - "github.com/cortexproject/cortex/pkg/ring" -) - -type rulerReplicationStrategy struct { -} - -func (r rulerReplicationStrategy) Filter(instances []ring.IngesterDesc, op ring.Operation, _ int, heartbeatTimeout time.Duration, _ bool) (healthy []ring.IngesterDesc, maxFailures int, err error) { - // Filter out unhealthy instances. - for i := 0; i < len(instances); { - if instances[i].IsHealthy(op, heartbeatTimeout) { - i++ - } else { - instances = append(instances[:i], instances[i+1:]...) - } - } - - if len(instances) == 0 { - return nil, 0, errors.New("no healthy ruler instance found for the replication set") - } - - return instances, len(instances) - 1, nil -} - -func (r rulerReplicationStrategy) ShouldExtendReplicaSet(instance ring.IngesterDesc, op ring.Operation) bool { - // Only ACTIVE rulers get any rule groups. If instance is not ACTIVE, we need to find another ruler. - if op == ring.Ruler && instance.GetState() != ring.ACTIVE { - return true - } - return false -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go index 3cab30bc4a2..2ea58e7cfbd 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/ruler_ring.go @@ -10,8 +10,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -21,6 +21,12 @@ const ( ringAutoForgetUnhealthyPeriods = 2 ) +// RingOp is the operation used for distributing rule groups between rulers. +var RingOp = ring.NewOp([]ring.IngesterState{ring.ACTIVE}, func(s ring.IngesterState) bool { + // Only ACTIVE rulers get any rule groups. If instance is not ACTIVE, we need to find another ruler. + return s != ring.ACTIVE +}) + // RingConfig masks the ring lifecycler config which contains // many options not really required by the rulers ring. This config // is used to strip down the config to the minimum, and avoid confusion @@ -48,7 +54,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } @@ -63,7 +69,7 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { f.StringVar(&cfg.InstanceAddr, "ruler.ring.instance-addr", "", "IP address to advertise in the ring.") f.IntVar(&cfg.InstancePort, "ruler.ring.instance-port", 0, "Port to advertise in the ring (defaults to server.grpc-listen-port).") f.StringVar(&cfg.InstanceID, "ruler.ring.instance-id", hostname, "Instance ID to register in the ring.") - f.IntVar(&cfg.NumTokens, "ruler.ring.num-tokens", 128, "Number of tokens for each ingester.") + f.IntVar(&cfg.NumTokens, "ruler.ring.num-tokens", 128, "Number of tokens for each ruler.") } // ToLifecyclerConfig returns a LifecyclerConfig based on the ruler @@ -91,6 +97,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.KVStore = cfg.KVStore rc.HeartbeatTimeout = cfg.HeartbeatTimeout + rc.SubringCacheDisabled = true // Each rule group is loaded to *exactly* one ruler. rc.ReplicationFactor = 1 diff --git a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go index 1fb7d65586c..344c6d8f029 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go +++ b/vendor/github.com/cortexproject/cortex/pkg/ruler/rules/objectclient/rule_store.go @@ -14,7 +14,7 @@ import ( "github.com/cortexproject/cortex/pkg/chunk" "github.com/cortexproject/cortex/pkg/ruler/rules" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Object Rule Storage Schema @@ -49,7 +49,7 @@ func NewRuleStore(client chunk.ObjectClient, loadConcurrency int) *RuleStore { func (o *RuleStore) getRuleGroup(ctx context.Context, objectKey string, rg *rules.RuleGroupDesc) (*rules.RuleGroupDesc, error) { reader, err := o.client.GetObject(ctx, objectKey) if err == chunk.ErrStorageObjectNotFound { - level.Debug(util.Logger).Log("msg", "rule group does not exist", "name", objectKey) + level.Debug(util_log.Logger).Log("msg", "rule group does not exist", "name", objectKey) return nil, rules.ErrGroupNotFound } @@ -139,10 +139,10 @@ func (o *RuleStore) LoadRuleGroups(ctx context.Context, groupsToLoad map[string] key := generateRuleObjectKey(user, namespace, group) - level.Debug(util.Logger).Log("msg", "loading rule group", "key", key, "user", user) + level.Debug(util_log.Logger).Log("msg", "loading rule group", "key", key, "user", user) gr, err := o.getRuleGroup(gCtx, key, gr) // reuse group pointer from the map. if err != nil { - level.Error(util.Logger).Log("msg", "failed to get rule group", "key", key, "user", user) + level.Error(util_log.Logger).Log("msg", "failed to get rule group", "key", key, "user", user) return err } @@ -227,10 +227,10 @@ func (o *RuleStore) DeleteNamespace(ctx context.Context, userID, namespace strin } for _, obj := range ruleGroupObjects { - level.Debug(util.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key) + level.Debug(util_log.Logger).Log("msg", "deleting rule group", "namespace", namespace, "key", obj.Key) err = o.client.DeleteObject(ctx, obj.Key) if err != nil { - level.Error(util.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key) + level.Error(util_log.Logger).Log("msg", "unable to delete rule group from namespace", "err", err, "namespace", namespace, "key", obj.Key) return err } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go index 47aed9561e1..7b18f5f3a51 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/scheduler.go @@ -23,9 +23,11 @@ import ( "github.com/cortexproject/cortex/pkg/frontend/v2/frontendv2pb" "github.com/cortexproject/cortex/pkg/scheduler/queue" "github.com/cortexproject/cortex/pkg/scheduler/schedulerpb" + "github.com/cortexproject/cortex/pkg/tenant" "github.com/cortexproject/cortex/pkg/util/grpcclient" "github.com/cortexproject/cortex/pkg/util/grpcutil" "github.com/cortexproject/cortex/pkg/util/services" + "github.com/cortexproject/cortex/pkg/util/validation" ) var ( @@ -72,7 +74,7 @@ type connectedFrontend struct { type Config struct { MaxOutstandingPerTenant int `yaml:"max_outstanding_requests_per_tenant"` - GRPCClientConfig grpcclient.ConfigWithTLS `yaml:"grpc_client_config" doc:"description=This configures the gRPC client used to report errors back to the query-frontend."` + GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config" doc:"description=This configures the gRPC client used to report errors back to the query-frontend."` } func (cfg *Config) RegisterFlags(f *flag.FlagSet) { @@ -126,6 +128,7 @@ type schedulerRequest struct { userID string queryID uint64 request *httpgrpc.HTTPRequest + statsEnabled bool enqueueTime time.Time @@ -264,6 +267,7 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr userID: msg.UserID, queryID: msg.QueryID, request: msg.HttpRequest, + statsEnabled: msg.StatsEnabled, } req.parentSpanContext = parentSpanContext @@ -271,7 +275,12 @@ func (s *Scheduler) enqueueRequest(frontendContext context.Context, frontendAddr req.enqueueTime = time.Now() req.ctxCancel = cancel - maxQueriers := s.limits.MaxQueriersPerUser(userID) + // aggregate the max queriers limit in the case of a multi tenant query + tenantIDs, err := tenant.TenantIDsFromOrgID(userID) + if err != nil { + return err + } + maxQueriers := validation.SmallestPositiveNonZeroIntPerTenant(tenantIDs, s.limits.MaxQueriersPerUser) return s.requestQueue.EnqueueRequest(userID, req, maxQueriers, func() { shouldCancel = false @@ -371,6 +380,7 @@ func (s *Scheduler) forwardRequestToQuerier(querier schedulerpb.SchedulerForQuer QueryID: req.queryID, FrontendAddress: req.frontendAddress, HttpRequest: req.request, + StatsEnabled: req.statsEnabled, }) if err != nil { errCh <- err diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go index a2698e5749a..44f95e88408 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.pb.go @@ -136,6 +136,9 @@ type SchedulerToQuerier struct { FrontendAddress string `protobuf:"bytes,3,opt,name=frontendAddress,proto3" json:"frontendAddress,omitempty"` // User who initiated the request. Needed to send reply back to frontend. UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` + // Whether query statistics tracking should be enabled. The response will include + // statistics only when this option is enabled. + StatsEnabled bool `protobuf:"varint,5,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` } func (m *SchedulerToQuerier) Reset() { *m = SchedulerToQuerier{} } @@ -198,6 +201,13 @@ func (m *SchedulerToQuerier) GetUserID() string { return "" } +func (m *SchedulerToQuerier) GetStatsEnabled() bool { + if m != nil { + return m.StatsEnabled + } + return false +} + type FrontendToScheduler struct { Type FrontendToSchedulerType `protobuf:"varint,1,opt,name=type,proto3,enum=schedulerpb.FrontendToSchedulerType" json:"type,omitempty"` // Used by INIT message. Will be put into all requests passed to querier. @@ -206,8 +216,9 @@ type FrontendToScheduler struct { // Each frontend manages its own queryIDs. Different frontends may use same set of query IDs. QueryID uint64 `protobuf:"varint,3,opt,name=queryID,proto3" json:"queryID,omitempty"` // Following are used by ENQUEUE only. - UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` - HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,5,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` + UserID string `protobuf:"bytes,4,opt,name=userID,proto3" json:"userID,omitempty"` + HttpRequest *httpgrpc.HTTPRequest `protobuf:"bytes,5,opt,name=httpRequest,proto3" json:"httpRequest,omitempty"` + StatsEnabled bool `protobuf:"varint,6,opt,name=statsEnabled,proto3" json:"statsEnabled,omitempty"` } func (m *FrontendToScheduler) Reset() { *m = FrontendToScheduler{} } @@ -277,6 +288,13 @@ func (m *FrontendToScheduler) GetHttpRequest() *httpgrpc.HTTPRequest { return nil } +func (m *FrontendToScheduler) GetStatsEnabled() bool { + if m != nil { + return m.StatsEnabled + } + return false +} + type SchedulerToFrontend struct { Status SchedulerToFrontendStatus `protobuf:"varint,1,opt,name=status,proto3,enum=schedulerpb.SchedulerToFrontendStatus" json:"status,omitempty"` Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` @@ -340,43 +358,45 @@ func init() { func init() { proto.RegisterFile("scheduler.proto", fileDescriptor_2b3fc28395a6d9c5) } var fileDescriptor_2b3fc28395a6d9c5 = []byte{ - // 570 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x5d, 0x6f, 0x12, 0x41, - 0x14, 0xdd, 0xa1, 0x40, 0xe5, 0xa2, 0x76, 0x9d, 0x56, 0x45, 0xd2, 0x4c, 0x09, 0x31, 0x86, 0x34, - 0x11, 0x0c, 0x9a, 0xe8, 0x83, 0x31, 0xc1, 0x76, 0x6b, 0x89, 0x75, 0x29, 0xc3, 0x10, 0x3f, 0x5e, - 0x48, 0x81, 0x29, 0x34, 0x2d, 0xcc, 0x76, 0x76, 0xd7, 0x86, 0x37, 0x7f, 0x82, 0x3f, 0x43, 0xff, - 0x89, 0x8f, 0x3c, 0xf6, 0x51, 0x16, 0x1f, 0x7c, 0xec, 0x4f, 0x30, 0x1d, 0x96, 0x75, 0xa9, 0x90, - 0xfa, 0x76, 0xef, 0xdd, 0x73, 0x72, 0xce, 0x3d, 0x33, 0x3b, 0xb0, 0x62, 0xb7, 0xba, 0xbc, 0xed, - 0x9e, 0x70, 0x99, 0xb7, 0xa4, 0x70, 0x04, 0x4e, 0x06, 0x03, 0xab, 0x99, 0x7e, 0xdc, 0x39, 0x72, - 0xba, 0x6e, 0x33, 0xdf, 0x12, 0xbd, 0x42, 0x47, 0x74, 0x44, 0x41, 0x61, 0x9a, 0xee, 0xa1, 0xea, - 0x54, 0xa3, 0xaa, 0x09, 0x37, 0xfd, 0x2c, 0x04, 0x3f, 0xe3, 0x07, 0x9f, 0xf9, 0x99, 0x90, 0xc7, - 0x76, 0xa1, 0x25, 0x7a, 0x3d, 0xd1, 0x2f, 0x74, 0x1d, 0xc7, 0xea, 0x48, 0xab, 0x15, 0x14, 0x13, - 0x56, 0xb6, 0x08, 0xb8, 0xea, 0x72, 0x79, 0xc4, 0x25, 0x13, 0xb5, 0xa9, 0x38, 0x5e, 0x87, 0xc4, - 0xe9, 0x64, 0x5a, 0xde, 0x4e, 0xa1, 0x0c, 0xca, 0x25, 0xe8, 0xdf, 0x41, 0xf6, 0x3b, 0x02, 0x1c, - 0x60, 0x99, 0xf0, 0xf9, 0x38, 0x05, 0xcb, 0x97, 0x98, 0x81, 0x4f, 0x89, 0xd2, 0x69, 0x8b, 0x9f, - 0x43, 0xf2, 0x52, 0x96, 0xf2, 0x53, 0x97, 0xdb, 0x4e, 0x2a, 0x92, 0x41, 0xb9, 0x64, 0xf1, 0x6e, - 0x3e, 0xb0, 0xb2, 0xcb, 0xd8, 0xbe, 0xff, 0x91, 0x86, 0x91, 0x38, 0x07, 0x2b, 0x87, 0x52, 0xf4, - 0x1d, 0xde, 0x6f, 0x97, 0xda, 0x6d, 0xc9, 0x6d, 0x3b, 0xb5, 0xa4, 0xdc, 0x5c, 0x1d, 0xe3, 0x7b, - 0x10, 0x77, 0x6d, 0x65, 0x37, 0xaa, 0x00, 0x7e, 0x97, 0xfd, 0x85, 0x60, 0x75, 0xc7, 0xc7, 0x86, - 0x37, 0x7c, 0x01, 0x51, 0x67, 0x60, 0x71, 0xe5, 0xf4, 0x76, 0xf1, 0x61, 0x3e, 0x14, 0x7c, 0x7e, - 0x0e, 0x9e, 0x0d, 0x2c, 0x4e, 0x15, 0x63, 0x9e, 0xa7, 0xc8, 0x7c, 0x4f, 0xa1, 0x40, 0x96, 0x66, - 0x03, 0x59, 0xe0, 0xf6, 0x6a, 0x50, 0xb1, 0xff, 0x0d, 0x2a, 0x7b, 0x0c, 0xab, 0xa1, 0x13, 0x99, - 0x2e, 0x80, 0x5f, 0x41, 0xdc, 0x76, 0x0e, 0x1c, 0xd7, 0xf6, 0xf7, 0x7c, 0x34, 0xb3, 0xe7, 0x1c, - 0x46, 0x4d, 0xa1, 0xa9, 0xcf, 0xc2, 0x6b, 0x10, 0xe3, 0x52, 0x0a, 0xe9, 0x6f, 0x38, 0x69, 0x36, - 0x5f, 0xc2, 0xfd, 0x05, 0x11, 0xe1, 0x1b, 0x10, 0x2d, 0x9b, 0x65, 0xa6, 0x6b, 0x38, 0x09, 0xcb, - 0x86, 0x59, 0xad, 0x1b, 0x75, 0x43, 0x47, 0x18, 0x20, 0xbe, 0x55, 0x32, 0xb7, 0x8c, 0x3d, 0x3d, - 0xb2, 0xd9, 0x82, 0x07, 0x0b, 0x85, 0x71, 0x1c, 0x22, 0x95, 0xb7, 0xba, 0x86, 0x33, 0xb0, 0xce, - 0x2a, 0x95, 0xc6, 0xbb, 0x92, 0xf9, 0xb1, 0x41, 0x8d, 0x6a, 0xdd, 0xa8, 0xb1, 0x5a, 0x63, 0xdf, - 0xa0, 0x0d, 0x66, 0x98, 0x25, 0x93, 0xe9, 0x08, 0x27, 0x20, 0x66, 0x50, 0x5a, 0xa1, 0x7a, 0x04, - 0xdf, 0x81, 0x5b, 0xb5, 0xdd, 0x3a, 0x63, 0x65, 0xf3, 0x4d, 0x63, 0xbb, 0xf2, 0xde, 0xd4, 0x97, - 0x8a, 0x27, 0xa1, 0x3c, 0x76, 0x84, 0x9c, 0x5e, 0xd1, 0x3a, 0x24, 0xfd, 0x72, 0x4f, 0x08, 0x0b, - 0x6f, 0xcc, 0xc4, 0xf1, 0xef, 0x7f, 0x90, 0xde, 0x58, 0x94, 0x97, 0x8f, 0xcd, 0x6a, 0x39, 0xf4, - 0x04, 0x15, 0x2d, 0x58, 0x0b, 0xab, 0x05, 0xf1, 0x7f, 0x80, 0x9b, 0xd3, 0x5a, 0xe9, 0x65, 0xae, - 0xbb, 0x66, 0xe9, 0xcc, 0x75, 0x07, 0x34, 0x51, 0x7c, 0x5d, 0x1a, 0x8e, 0x88, 0x76, 0x3e, 0x22, - 0xda, 0xc5, 0x88, 0xa0, 0x2f, 0x1e, 0x41, 0xdf, 0x3c, 0x82, 0x7e, 0x78, 0x04, 0x0d, 0x3d, 0x82, - 0x7e, 0x7a, 0x04, 0xfd, 0xf6, 0x88, 0x76, 0xe1, 0x11, 0xf4, 0x75, 0x4c, 0xb4, 0xe1, 0x98, 0x68, - 0xe7, 0x63, 0xa2, 0x7d, 0x0a, 0x3f, 0x2f, 0xcd, 0xb8, 0x7a, 0x00, 0x9e, 0xfe, 0x09, 0x00, 0x00, - 0xff, 0xff, 0x89, 0xbf, 0xda, 0x9a, 0x85, 0x04, 0x00, 0x00, + // 598 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x94, 0xcf, 0x4f, 0x13, 0x41, + 0x14, 0xc7, 0x77, 0x96, 0x76, 0x81, 0x57, 0x94, 0x75, 0x40, 0xad, 0x0d, 0x19, 0x36, 0x8d, 0x31, + 0x0d, 0x89, 0xad, 0xa9, 0x26, 0x7a, 0x30, 0x26, 0x15, 0x16, 0x69, 0xc4, 0x2d, 0x4c, 0xa7, 0xf1, + 0xc7, 0xa5, 0xa1, 0xed, 0x50, 0x08, 0xd0, 0x59, 0x66, 0x77, 0x25, 0xdc, 0x3c, 0x7a, 0xf4, 0xcf, + 0xf0, 0x4f, 0xf1, 0x62, 0xc2, 0x91, 0xa3, 0x6c, 0x2f, 0x1e, 0xf9, 0x13, 0x4c, 0xa7, 0xdb, 0xba, + 0xad, 0x6d, 0xf0, 0xf6, 0xde, 0xeb, 0xf7, 0xdb, 0xf7, 0xe6, 0xf3, 0x66, 0x07, 0x16, 0xbd, 0xe6, + 0x01, 0x6f, 0x05, 0xc7, 0x5c, 0xe6, 0x5d, 0x29, 0x7c, 0x81, 0x53, 0xc3, 0x82, 0xdb, 0xc8, 0x3c, + 0x6e, 0x1f, 0xfa, 0x07, 0x41, 0x23, 0xdf, 0x14, 0x27, 0x85, 0xb6, 0x68, 0x8b, 0x82, 0xd2, 0x34, + 0x82, 0x7d, 0x95, 0xa9, 0x44, 0x45, 0x7d, 0x6f, 0xe6, 0x59, 0x4c, 0x7e, 0xc6, 0xf7, 0x3e, 0xf3, + 0x33, 0x21, 0x8f, 0xbc, 0x42, 0x53, 0x9c, 0x9c, 0x88, 0x4e, 0xe1, 0xc0, 0xf7, 0xdd, 0xb6, 0x74, + 0x9b, 0xc3, 0xa0, 0xef, 0xca, 0x16, 0x01, 0xef, 0x06, 0x5c, 0x1e, 0x72, 0xc9, 0x44, 0x75, 0xd0, + 0x1c, 0xaf, 0xc0, 0xfc, 0x69, 0xbf, 0x5a, 0xde, 0x48, 0x23, 0x0b, 0xe5, 0xe6, 0xe9, 0xdf, 0x42, + 0xf6, 0x27, 0x02, 0x3c, 0xd4, 0x32, 0x11, 0xf9, 0x71, 0x1a, 0x66, 0x7b, 0x9a, 0xf3, 0xc8, 0x92, + 0xa0, 0x83, 0x14, 0x3f, 0x87, 0x54, 0xaf, 0x2d, 0xe5, 0xa7, 0x01, 0xf7, 0xfc, 0xb4, 0x6e, 0xa1, + 0x5c, 0xaa, 0x78, 0x37, 0x3f, 0x1c, 0x65, 0x8b, 0xb1, 0x9d, 0xe8, 0x47, 0x1a, 0x57, 0xe2, 0x1c, + 0x2c, 0xee, 0x4b, 0xd1, 0xf1, 0x79, 0xa7, 0x55, 0x6a, 0xb5, 0x24, 0xf7, 0xbc, 0xf4, 0x8c, 0x9a, + 0x66, 0xbc, 0x8c, 0xef, 0x81, 0x11, 0x78, 0x6a, 0xdc, 0x84, 0x12, 0x44, 0x19, 0xce, 0xc2, 0x82, + 0xe7, 0xef, 0xf9, 0x9e, 0xdd, 0xd9, 0x6b, 0x1c, 0xf3, 0x56, 0x3a, 0x69, 0xa1, 0xdc, 0x1c, 0x1d, + 0xa9, 0x65, 0xbf, 0xea, 0xb0, 0xb4, 0x19, 0xfd, 0x5f, 0x9c, 0xc2, 0x0b, 0x48, 0xf8, 0xe7, 0x2e, + 0x57, 0xa7, 0xb9, 0x5d, 0x7c, 0x98, 0x8f, 0x2d, 0x27, 0x3f, 0x41, 0xcf, 0xce, 0x5d, 0x4e, 0x95, + 0x63, 0xd2, 0xdc, 0xfa, 0xe4, 0xb9, 0x63, 0xd0, 0x66, 0x46, 0xa1, 0x4d, 0x3b, 0xd1, 0x18, 0xcc, + 0xe4, 0x7f, 0xc3, 0x1c, 0x47, 0x61, 0x4c, 0x40, 0x71, 0x04, 0x4b, 0xb1, 0xcd, 0x0e, 0x0e, 0x89, + 0x5f, 0x81, 0xd1, 0x93, 0x05, 0x5e, 0xc4, 0xe2, 0xd1, 0x08, 0x8b, 0x09, 0x8e, 0xaa, 0x52, 0xd3, + 0xc8, 0x85, 0x97, 0x21, 0xc9, 0xa5, 0x14, 0x32, 0xa2, 0xd0, 0x4f, 0xd6, 0x5e, 0xc2, 0xfd, 0x29, + 0x18, 0xf1, 0x1c, 0x24, 0xca, 0x4e, 0x99, 0x99, 0x1a, 0x4e, 0xc1, 0xac, 0xed, 0xec, 0xd6, 0xec, + 0x9a, 0x6d, 0x22, 0x0c, 0x60, 0xac, 0x97, 0x9c, 0x75, 0x7b, 0xdb, 0xd4, 0xd7, 0x9a, 0xf0, 0x60, + 0x6a, 0x63, 0x6c, 0x80, 0x5e, 0x79, 0x6b, 0x6a, 0xd8, 0x82, 0x15, 0x56, 0xa9, 0xd4, 0xdf, 0x95, + 0x9c, 0x8f, 0x75, 0x6a, 0xef, 0xd6, 0xec, 0x2a, 0xab, 0xd6, 0x77, 0x6c, 0x5a, 0x67, 0xb6, 0x53, + 0x72, 0x98, 0x89, 0xf0, 0x3c, 0x24, 0x6d, 0x4a, 0x2b, 0xd4, 0xd4, 0xf1, 0x1d, 0xb8, 0x55, 0xdd, + 0xaa, 0x31, 0x56, 0x76, 0xde, 0xd4, 0x37, 0x2a, 0xef, 0x1d, 0x73, 0xa6, 0x78, 0x1c, 0xe3, 0xb1, + 0x29, 0xe4, 0xe0, 0xaa, 0xd7, 0x20, 0x15, 0x85, 0xdb, 0x42, 0xb8, 0x78, 0x75, 0x04, 0xc7, 0xbf, + 0xdf, 0x53, 0x66, 0x75, 0x1a, 0xaf, 0x48, 0x9b, 0xd5, 0x72, 0xe8, 0x09, 0x2a, 0xba, 0xb0, 0x1c, + 0xef, 0x36, 0xc4, 0xff, 0x01, 0x16, 0x06, 0xb1, 0xea, 0x67, 0xdd, 0x74, 0x15, 0x33, 0xd6, 0x4d, + 0x0b, 0xea, 0x77, 0x7c, 0x5d, 0xba, 0xb8, 0x22, 0xda, 0xe5, 0x15, 0xd1, 0xae, 0xaf, 0x08, 0xfa, + 0x12, 0x12, 0xf4, 0x3d, 0x24, 0xe8, 0x47, 0x48, 0xd0, 0x45, 0x48, 0xd0, 0xaf, 0x90, 0xa0, 0xdf, + 0x21, 0xd1, 0xae, 0x43, 0x82, 0xbe, 0x75, 0x89, 0x76, 0xd1, 0x25, 0xda, 0x65, 0x97, 0x68, 0x9f, + 0xe2, 0xcf, 0x54, 0xc3, 0x50, 0x0f, 0xc9, 0xd3, 0x3f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x85, 0xa8, + 0x0d, 0xe8, 0xcd, 0x04, 0x00, 0x00, } func (x FrontendToSchedulerType) String() string { @@ -448,6 +468,9 @@ func (this *SchedulerToQuerier) Equal(that interface{}) bool { if this.UserID != that1.UserID { return false } + if this.StatsEnabled != that1.StatsEnabled { + return false + } return true } func (this *FrontendToScheduler) Equal(that interface{}) bool { @@ -484,6 +507,9 @@ func (this *FrontendToScheduler) Equal(that interface{}) bool { if !this.HttpRequest.Equal(that1.HttpRequest) { return false } + if this.StatsEnabled != that1.StatsEnabled { + return false + } return true } func (this *SchedulerToFrontend) Equal(that interface{}) bool { @@ -527,7 +553,7 @@ func (this *SchedulerToQuerier) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 8) + s := make([]string, 0, 9) s = append(s, "&schedulerpb.SchedulerToQuerier{") s = append(s, "QueryID: "+fmt.Sprintf("%#v", this.QueryID)+",\n") if this.HttpRequest != nil { @@ -535,6 +561,7 @@ func (this *SchedulerToQuerier) GoString() string { } s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") s = append(s, "UserID: "+fmt.Sprintf("%#v", this.UserID)+",\n") + s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -542,7 +569,7 @@ func (this *FrontendToScheduler) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 9) + s := make([]string, 0, 10) s = append(s, "&schedulerpb.FrontendToScheduler{") s = append(s, "Type: "+fmt.Sprintf("%#v", this.Type)+",\n") s = append(s, "FrontendAddress: "+fmt.Sprintf("%#v", this.FrontendAddress)+",\n") @@ -551,6 +578,7 @@ func (this *FrontendToScheduler) GoString() string { if this.HttpRequest != nil { s = append(s, "HttpRequest: "+fmt.Sprintf("%#v", this.HttpRequest)+",\n") } + s = append(s, "StatsEnabled: "+fmt.Sprintf("%#v", this.StatsEnabled)+",\n") s = append(s, "}") return strings.Join(s, "") } @@ -864,6 +892,16 @@ func (m *SchedulerToQuerier) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StatsEnabled { + i-- + if m.StatsEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } if len(m.UserID) > 0 { i -= len(m.UserID) copy(dAtA[i:], m.UserID) @@ -918,6 +956,16 @@ func (m *FrontendToScheduler) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.StatsEnabled { + i-- + if m.StatsEnabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x30 + } if m.HttpRequest != nil { { size, err := m.HttpRequest.MarshalToSizedBuffer(dAtA[:i]) @@ -1037,6 +1085,9 @@ func (m *SchedulerToQuerier) Size() (n int) { if l > 0 { n += 1 + l + sovScheduler(uint64(l)) } + if m.StatsEnabled { + n += 2 + } return n } @@ -1064,6 +1115,9 @@ func (m *FrontendToScheduler) Size() (n int) { l = m.HttpRequest.Size() n += 1 + l + sovScheduler(uint64(l)) } + if m.StatsEnabled { + n += 2 + } return n } @@ -1108,6 +1162,7 @@ func (this *SchedulerToQuerier) String() string { `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, `FrontendAddress:` + fmt.Sprintf("%v", this.FrontendAddress) + `,`, `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, + `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, `}`, }, "") return s @@ -1122,6 +1177,7 @@ func (this *FrontendToScheduler) String() string { `QueryID:` + fmt.Sprintf("%v", this.QueryID) + `,`, `UserID:` + fmt.Sprintf("%v", this.UserID) + `,`, `HttpRequest:` + strings.Replace(fmt.Sprintf("%v", this.HttpRequest), "HTTPRequest", "httpgrpc.HTTPRequest", 1) + `,`, + `StatsEnabled:` + fmt.Sprintf("%v", this.StatsEnabled) + `,`, `}`, }, "") return s @@ -1378,6 +1434,26 @@ func (m *SchedulerToQuerier) Unmarshal(dAtA []byte) error { } m.UserID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatsEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StatsEnabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipScheduler(dAtA[iNdEx:]) @@ -1569,6 +1645,26 @@ func (m *FrontendToScheduler) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field StatsEnabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowScheduler + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.StatsEnabled = bool(v != 0) default: iNdEx = preIndex skippy, err := skipScheduler(dAtA[iNdEx:]) diff --git a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto index 62fab0d408f..c641fb8cbc1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto +++ b/vendor/github.com/cortexproject/cortex/pkg/scheduler/schedulerpb/scheduler.proto @@ -38,6 +38,10 @@ message SchedulerToQuerier { // User who initiated the request. Needed to send reply back to frontend. string userID = 4; + + // Whether query statistics tracking should be enabled. The response will include + // statistics only when this option is enabled. + bool statsEnabled = 5; } // Scheduler interface exposed to Frontend. Frontend can enqueue and cancel requests. @@ -70,6 +74,7 @@ message FrontendToScheduler { // Following are used by ENQUEUE only. string userID = 4; httpgrpc.HTTPRequest httpRequest = 5; + bool statsEnabled = 6; } enum SchedulerToFrontendStatus { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go new file mode 100644 index 00000000000..50c3bd11601 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/bucket_util.go @@ -0,0 +1,33 @@ +package bucket + +import ( + "context" + "strings" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/thanos-io/thanos/pkg/objstore" +) + +// DeletePrefix removes all objects with given prefix, recursively. +// It returns number of deleted objects. +// If deletion of any object fails, it returns error and stops. +func DeletePrefix(ctx context.Context, bkt objstore.Bucket, prefix string, logger log.Logger) (int, error) { + result := 0 + err := bkt.Iter(ctx, prefix, func(name string) error { + if strings.HasSuffix(name, objstore.DirDelim) { + deleted, err := DeletePrefix(ctx, bkt, name, logger) + result += deleted + return err + } + + if err := bkt.Delete(ctx, name); err != nil { + return err + } + result++ + level.Debug(logger).Log("msg", "deleted file", "file", name) + return nil + }) + + return result, err +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go index c09fe92f77e..cacac031c0f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/client_mock.go @@ -6,6 +6,7 @@ import ( "errors" "io" "io/ioutil" + "time" "github.com/stretchr/testify/mock" "github.com/thanos-io/thanos/pkg/objstore" @@ -24,6 +25,10 @@ func (m *ClientMock) Upload(ctx context.Context, name string, r io.Reader) error return args.Error(0) } +func (m *ClientMock) MockUpload(name string, err error) { + m.On("Upload", mock.Anything, name, mock.Anything).Return(err) +} + // Delete mocks objstore.Bucket.Delete() func (m *ClientMock) Delete(ctx context.Context, name string) error { args := m.Called(ctx, name) @@ -78,6 +83,10 @@ func (m *ClientMock) Get(ctx context.Context, name string) (io.ReadCloser, error func (m *ClientMock) MockGet(name, content string, err error) { if content != "" { m.On("Exists", mock.Anything, name).Return(true, err) + m.On("Attributes", mock.Anything, name).Return(objstore.ObjectAttributes{ + Size: int64(len(content)), + LastModified: time.Now(), + }, nil) // Since we return an ReadCloser and it can be consumed only once, // each time the mocked Get() is called we do create a new one, so @@ -89,6 +98,7 @@ func (m *ClientMock) MockGet(name, content string, err error) { } else { m.On("Exists", mock.Anything, name).Return(false, err) m.On("Get", mock.Anything, name).Return(nil, errObjectDoesNotExist) + m.On("Attributes", mock.Anything, name).Return(nil, errObjectDoesNotExist) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go index 51dab86036f..2f17bbe737e 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/bucket_client.go @@ -28,6 +28,11 @@ func newS3Config(cfg Config) s3.Config { IdleConnTimeout: model.Duration(cfg.HTTP.IdleConnTimeout), ResponseHeaderTimeout: model.Duration(cfg.HTTP.ResponseHeaderTimeout), InsecureSkipVerify: cfg.HTTP.InsecureSkipVerify, + TLSHandshakeTimeout: model.Duration(cfg.HTTP.TLSHandshakeTimeout), + ExpectContinueTimeout: model.Duration(cfg.HTTP.ExpectContinueTimeout), + MaxIdleConns: cfg.HTTP.MaxIdleConns, + MaxIdleConnsPerHost: cfg.HTTP.MaxIdleConnsPerHost, + MaxConnsPerHost: cfg.HTTP.MaxConnsPerHost, Transport: cfg.HTTP.Transport, }, // Enforce signature version 2 if CLI flag is set diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go index 96db7e1f0c1..17d2f77f7da 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/s3/config.go @@ -27,6 +27,11 @@ type HTTPConfig struct { IdleConnTimeout time.Duration `yaml:"idle_conn_timeout"` ResponseHeaderTimeout time.Duration `yaml:"response_header_timeout"` InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + TLSHandshakeTimeout time.Duration `yaml:"tls_handshake_timeout"` + ExpectContinueTimeout time.Duration `yaml:"expect_continue_timeout"` + MaxIdleConns int `yaml:"max_idle_connections"` + MaxIdleConnsPerHost int `yaml:"max_idle_connections_per_host"` + MaxConnsPerHost int `yaml:"max_connections_per_host"` // Allow upstream callers to inject a round tripper Transport http.RoundTripper `yaml:"-"` @@ -37,6 +42,11 @@ func (cfg *HTTPConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.DurationVar(&cfg.IdleConnTimeout, prefix+"s3.http.idle-conn-timeout", 90*time.Second, "The time an idle connection will remain idle before closing.") f.DurationVar(&cfg.ResponseHeaderTimeout, prefix+"s3.http.response-header-timeout", 2*time.Minute, "The amount of time the client will wait for a servers response headers.") f.BoolVar(&cfg.InsecureSkipVerify, prefix+"s3.http.insecure-skip-verify", false, "If the client connects to S3 via HTTPS and this option is enabled, the client will accept any certificate and hostname.") + f.DurationVar(&cfg.TLSHandshakeTimeout, prefix+"s3.tls-handshake-timeout", 10*time.Second, "Maximum time to wait for a TLS handshake. 0 means no limit.") + f.DurationVar(&cfg.ExpectContinueTimeout, prefix+"s3.expect-continue-timeout", 1*time.Second, "The time to wait for a server's first response headers after fully writing the request headers if the request has an Expect header. 0 to send the request body immediately.") + f.IntVar(&cfg.MaxIdleConns, prefix+"s3.max-idle-connections", 100, "Maximum number of idle (keep-alive) connections across all hosts. 0 means no limit.") + f.IntVar(&cfg.MaxIdleConnsPerHost, prefix+"s3.max-idle-connections-per-host", 100, "Maximum number of idle (keep-alive) connections to keep per-host. If 0, a built-in default value is used.") + f.IntVar(&cfg.MaxConnsPerHost, prefix+"s3.max-connections-per-host", 0, "Maximum number of connections per host. 0 means no limit.") } // Config holds the config options for an S3 backend diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go index 179647dd4fd..bc4dcd28050 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/bucket_client.go @@ -2,6 +2,7 @@ package swift import ( "github.com/go-kit/kit/log" + "github.com/prometheus/common/model" "github.com/thanos-io/thanos/pkg/objstore" "github.com/thanos-io/thanos/pkg/objstore/swift" yaml "gopkg.in/yaml.v2" @@ -9,7 +10,8 @@ import ( // NewBucketClient creates a new Swift bucket client func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucket, error) { - bucketConfig := swift.SwiftConfig{ + bucketConfig := swift.Config{ + AuthVersion: cfg.AuthVersion, AuthUrl: cfg.AuthURL, Username: cfg.Username, UserDomainName: cfg.UserDomainName, @@ -24,6 +26,13 @@ func NewBucketClient(cfg Config, name string, logger log.Logger) (objstore.Bucke ProjectDomainName: cfg.ProjectDomainName, RegionName: cfg.RegionName, ContainerName: cfg.ContainerName, + Retries: cfg.MaxRetries, + ConnectTimeout: model.Duration(cfg.ConnectTimeout), + Timeout: model.Duration(cfg.RequestTimeout), + + // Hard-coded defaults. + ChunkSize: swift.DefaultConfig.ChunkSize, + UseDynamicLargeObjects: false, } // Thanos currently doesn't support passing the config as is, but expects a YAML, diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go index 3bc682af7ed..783621f8874 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/bucket/swift/config.go @@ -2,24 +2,29 @@ package swift import ( "flag" + "time" ) // Config holds the config options for Swift backend type Config struct { - AuthURL string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserID string `yaml:"user_id"` - Password string `yaml:"password"` - DomainID string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` + AuthVersion int `yaml:"auth_version"` + AuthURL string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserID string `yaml:"user_id"` + Password string `yaml:"password"` + DomainID string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` + MaxRetries int `yaml:"max_retries"` + ConnectTimeout time.Duration `yaml:"connect_timeout"` + RequestTimeout time.Duration `yaml:"request_timeout"` } // RegisterFlags registers the flags for Swift storage @@ -29,6 +34,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { // RegisterFlagsWithPrefix registers the flags for Swift storage with the provided prefix func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.IntVar(&cfg.AuthVersion, prefix+"swift.auth-version", 0, "OpenStack Swift authentication API version. 0 to autodetect.") f.StringVar(&cfg.AuthURL, prefix+"swift.auth-url", "", "OpenStack Swift authentication URL") f.StringVar(&cfg.Username, prefix+"swift.username", "", "OpenStack Swift username.") f.StringVar(&cfg.UserDomainName, prefix+"swift.user-domain-name", "", "OpenStack Swift user's domain name.") @@ -43,4 +49,7 @@ func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.StringVar(&cfg.ProjectDomainName, prefix+"swift.project-domain-name", "", "Name of the OpenStack Swift project's domain (v3 auth only), only needed if it differs from the user domain.") f.StringVar(&cfg.RegionName, prefix+"swift.region-name", "", "OpenStack Swift Region to use (v2,v3 auth only).") f.StringVar(&cfg.ContainerName, prefix+"swift.container-name", "", "Name of the OpenStack Swift container to put chunks in.") + f.IntVar(&cfg.MaxRetries, prefix+"swift.max-retries", 3, "Max retries on requests error.") + f.DurationVar(&cfg.ConnectTimeout, prefix+"swift.connect-timeout", 10*time.Second, "Time after which a connection attempt is aborted.") + f.DurationVar(&cfg.RequestTimeout, prefix+"swift.request-timeout", 5*time.Second, "Time after which an idle request is aborted. The timeout watchdog is reset each time some data is received, so the timeout triggers after X time no data is received on a request.") } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go index a6600389bef..5c5f6cb5d4b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/index.go @@ -33,16 +33,37 @@ type Index struct { Version int `json:"version"` // List of complete blocks (partial blocks are excluded from the index). - Blocks []*Block `json:"blocks"` + Blocks Blocks `json:"blocks"` // List of block deletion marks. - BlockDeletionMarks []*BlockDeletionMark `json:"block_deletion_marks"` + BlockDeletionMarks BlockDeletionMarks `json:"block_deletion_marks"` // UpdatedAt is a unix timestamp (seconds precision) of when the index has been updated // (written in the storage) the last time. UpdatedAt int64 `json:"updated_at"` } +func (idx *Index) GetUpdatedAt() time.Time { + return time.Unix(idx.UpdatedAt, 0) +} + +// RemoveBlock removes block and its deletion mark (if any) from index. +func (idx *Index) RemoveBlock(id ulid.ULID) { + for i := 0; i < len(idx.Blocks); i++ { + if idx.Blocks[i].ID == id { + idx.Blocks = append(idx.Blocks[:i], idx.Blocks[i+1:]...) + break + } + } + + for i := 0; i < len(idx.BlockDeletionMarks); i++ { + if idx.BlockDeletionMarks[i].ID == id { + idx.BlockDeletionMarks = append(idx.BlockDeletionMarks[:i], idx.BlockDeletionMarks[i+1:]...) + break + } + } +} + // Block holds the information about a block in the index. type Block struct { // Block ID. @@ -64,6 +85,13 @@ type Block struct { UploadedAt int64 `json:"uploaded_at"` } +// Within returns whether the block contains samples within the provided range. +// Input minT and maxT are both inclusive. +func (m *Block) Within(minT, maxT int64) bool { + // NOTE: Block intervals are half-open: [MinTime, MaxTime). + return m.MinTime <= maxT && minT < m.MaxTime +} + func (m *Block) GetUploadedAt() time.Time { return time.Unix(m.UploadedAt, 0) } @@ -71,8 +99,8 @@ func (m *Block) GetUploadedAt() time.Time { // ThanosMeta returns a block meta based on the known information in the index. // The returned meta doesn't include all original meta.json data but only a subset // of it. -func (m *Block) ThanosMeta(userID string) metadata.Meta { - return metadata.Meta{ +func (m *Block) ThanosMeta(userID string) *metadata.Meta { + return &metadata.Meta{ BlockMeta: tsdb.BlockMeta{ ULID: m.ID, MinTime: m.MinTime, @@ -167,6 +195,19 @@ type BlockDeletionMark struct { DeletionTime int64 `json:"deletion_time"` } +func (m *BlockDeletionMark) GetDeletionTime() time.Time { + return time.Unix(m.DeletionTime, 0) +} + +// ThanosMeta returns the Thanos deletion mark. +func (m *BlockDeletionMark) ThanosDeletionMark() *metadata.DeletionMark { + return &metadata.DeletionMark{ + ID: m.ID, + Version: metadata.DeletionMarkVersion1, + DeletionTime: m.DeletionTime, + } +} + func BlockDeletionMarkFromThanosMarker(mark *metadata.DeletionMark) *BlockDeletionMark { return &BlockDeletionMark{ ID: mark.ID, @@ -174,6 +215,26 @@ func BlockDeletionMarkFromThanosMarker(mark *metadata.DeletionMark) *BlockDeleti } } +// BlockDeletionMarks holds a set of block deletion marks in the index. No ordering guaranteed. +type BlockDeletionMarks []*BlockDeletionMark + +func (s BlockDeletionMarks) GetULIDs() []ulid.ULID { + ids := make([]ulid.ULID, len(s)) + for i, m := range s { + ids[i] = m.ID + } + return ids +} + +func (s BlockDeletionMarks) Clone() BlockDeletionMarks { + clone := make(BlockDeletionMarks, len(s)) + for i, m := range s { + v := *m + clone[i] = &v + } + return clone +} + // Blocks holds a set of blocks in the index. No ordering guaranteed. type Blocks []*Block diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/loader.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/loader.go new file mode 100644 index 00000000000..127171c72cc --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/loader.go @@ -0,0 +1,275 @@ +package bucketindex + +import ( + "context" + "sync" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/objstore" + "go.uber.org/atomic" + + "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/services" +) + +const ( + // readIndexTimeout is the maximum allowed time when reading a single bucket index + // from the storage. It's hard-coded to a reasonably high value. + readIndexTimeout = 15 * time.Second +) + +type LoaderConfig struct { + CheckInterval time.Duration + UpdateOnStaleInterval time.Duration + UpdateOnErrorInterval time.Duration + IdleTimeout time.Duration +} + +// Loader is responsible to lazy load bucket indexes and, once loaded for the first time, +// keep them updated in background. Loaded indexes are automatically offloaded once the +// idle timeout expires. +type Loader struct { + services.Service + + bkt objstore.Bucket + logger log.Logger + cfg LoaderConfig + + indexesMx sync.RWMutex + indexes map[string]*cachedIndex + + // Metrics. + loadAttempts prometheus.Counter + loadFailures prometheus.Counter + loadDuration prometheus.Histogram + loaded prometheus.GaugeFunc +} + +// NewLoader makes a new Loader. +func NewLoader(cfg LoaderConfig, bucketClient objstore.Bucket, logger log.Logger, reg prometheus.Registerer) *Loader { + l := &Loader{ + bkt: bucketClient, + logger: logger, + cfg: cfg, + indexes: map[string]*cachedIndex{}, + + loadAttempts: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "cortex_bucket_index_loads_total", + Help: "Total number of bucket index loading attempts.", + }), + loadFailures: promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Name: "cortex_bucket_index_load_failures_total", + Help: "Total number of bucket index loading failures.", + }), + loadDuration: promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Name: "cortex_bucket_index_load_duration_seconds", + Help: "Duration of the a single bucket index loading operation in seconds.", + Buckets: []float64{0.01, 0.02, 0.05, 0.1, 0.2, 0.3, 1, 10}, + }), + } + + l.loaded = promauto.With(reg).NewGaugeFunc(prometheus.GaugeOpts{ + Name: "cortex_bucket_index_loaded", + Help: "Number of bucket indexes currently loaded in-memory.", + }, l.countLoadedIndexesMetric) + + // Apply a jitter to the sync frequency in order to increase the probability + // of hitting the shared cache (if any). + checkInterval := util.DurationWithJitter(cfg.CheckInterval, 0.2) + l.Service = services.NewTimerService(checkInterval, nil, l.checkCachedIndexes, nil) + + return l +} + +// GetIndex returns the bucket index for the given user. It returns the in-memory cached +// index if available, or load it from the bucket otherwise. +func (l *Loader) GetIndex(ctx context.Context, userID string) (*Index, error) { + l.indexesMx.RLock() + if entry := l.indexes[userID]; entry != nil { + idx := entry.index + err := entry.err + l.indexesMx.RUnlock() + + // We don't check if the index is stale because it's the responsibility + // of the background job to keep it updated. + entry.requestedAt.Store(time.Now().Unix()) + return idx, err + } + l.indexesMx.RUnlock() + + startTime := time.Now() + l.loadAttempts.Inc() + idx, err := ReadIndex(ctx, l.bkt, userID, l.logger) + if err != nil { + // Cache the error, to avoid hammering the object store in case of persistent issues + // (eg. corrupted bucket index or not existing). + l.cacheIndex(userID, nil, err) + + if errors.Is(err, ErrIndexNotFound) { + level.Warn(l.logger).Log("msg", "bucket index not found", "user", userID) + } else { + // We don't track ErrIndexNotFound as failure because it's a legit case (eg. a tenant just + // started to remote write and its blocks haven't uploaded to storage yet). + l.loadFailures.Inc() + level.Error(l.logger).Log("msg", "unable to load bucket index", "user", userID, "err", err) + } + + return nil, err + } + + // Cache the index. + l.cacheIndex(userID, idx, nil) + + elapsedTime := time.Since(startTime) + l.loadDuration.Observe(elapsedTime.Seconds()) + level.Info(l.logger).Log("msg", "loaded bucket index", "user", userID, "duration", elapsedTime) + return idx, nil +} + +func (l *Loader) cacheIndex(userID string, idx *Index, err error) { + l.indexesMx.Lock() + defer l.indexesMx.Unlock() + + // Not an issue if, due to concurrency, another index was already cached + // and we overwrite it: last will win. + l.indexes[userID] = newCachedIndex(idx, err) +} + +// checkCachedIndexes checks all cached indexes and, for each of them, does two things: +// 1. Offload indexes not requested since >= idle timeout +// 2. Update indexes which have been updated last time since >= update timeout +func (l *Loader) checkCachedIndexes(ctx context.Context) error { + // Build a list of users for which we should update or delete the index. + toUpdate, toDelete := l.checkCachedIndexesToUpdateAndDelete() + + // Delete unused indexes. + for _, userID := range toDelete { + l.deleteCachedIndex(userID) + } + + // Update actively used indexes. + for _, userID := range toUpdate { + l.updateCachedIndex(ctx, userID) + } + + // Never return error, otherwise the service terminates. + return nil +} + +func (l *Loader) checkCachedIndexesToUpdateAndDelete() (toUpdate, toDelete []string) { + now := time.Now() + + l.indexesMx.RLock() + defer l.indexesMx.RUnlock() + + for userID, entry := range l.indexes { + // Given ErrIndexNotFound is a legit case and assuming UpdateOnErrorInterval is lower than + // UpdateOnStaleInterval, we don't consider ErrIndexNotFound as an error with regards to the + // refresh interval and so it will updated once stale. + isError := entry.err != nil && !errors.Is(entry.err, ErrIndexNotFound) + + switch { + case now.Sub(entry.getRequestedAt()) >= l.cfg.IdleTimeout: + toDelete = append(toDelete, userID) + case isError && now.Sub(entry.getUpdatedAt()) >= l.cfg.UpdateOnErrorInterval: + toUpdate = append(toUpdate, userID) + case !isError && now.Sub(entry.getUpdatedAt()) >= l.cfg.UpdateOnStaleInterval: + toUpdate = append(toUpdate, userID) + } + } + + return +} + +func (l *Loader) updateCachedIndex(ctx context.Context, userID string) { + readCtx, cancel := context.WithTimeout(ctx, readIndexTimeout) + defer cancel() + + l.loadAttempts.Inc() + startTime := time.Now() + idx, err := ReadIndex(readCtx, l.bkt, userID, l.logger) + if err != nil && !errors.Is(err, ErrIndexNotFound) { + l.loadFailures.Inc() + level.Warn(l.logger).Log("msg", "unable to update bucket index", "user", userID, "err", err) + return + } + + l.loadDuration.Observe(time.Since(startTime).Seconds()) + + // We cache it either it was successfully refreshed or wasn't found. An use case for caching the ErrIndexNotFound + // is when a tenant has rules configured but hasn't started remote writing yet. Rules will be evaluated and + // bucket index loaded by the ruler. + l.indexesMx.Lock() + l.indexes[userID].index = idx + l.indexes[userID].err = err + l.indexes[userID].setUpdatedAt(startTime) + l.indexesMx.Unlock() +} + +func (l *Loader) deleteCachedIndex(userID string) { + l.indexesMx.Lock() + delete(l.indexes, userID) + l.indexesMx.Unlock() + + level.Info(l.logger).Log("msg", "unloaded bucket index", "user", userID, "reason", "idle") +} + +func (l *Loader) countLoadedIndexesMetric() float64 { + l.indexesMx.RLock() + defer l.indexesMx.RUnlock() + + count := 0 + for _, idx := range l.indexes { + if idx.index != nil { + count++ + } + } + return float64(count) +} + +type cachedIndex struct { + // We cache either the index or the error occurred while fetching it. They're + // mutually exclusive. + index *Index + err error + + // Unix timestamp (seconds) of when the index has been updated from the storage the last time. + updatedAt atomic.Int64 + + // Unix timestamp (seconds) of when the index has been requested the last time. + requestedAt atomic.Int64 +} + +func newCachedIndex(idx *Index, err error) *cachedIndex { + entry := &cachedIndex{ + index: idx, + err: err, + } + + now := time.Now() + entry.setUpdatedAt(now) + entry.setRequestedAt(now) + + return entry +} + +func (i *cachedIndex) setUpdatedAt(ts time.Time) { + i.updatedAt.Store(ts.Unix()) +} + +func (i *cachedIndex) getUpdatedAt() time.Time { + return time.Unix(i.updatedAt.Load(), 0) +} + +func (i *cachedIndex) setRequestedAt(ts time.Time) { + i.requestedAt.Store(ts.Unix()) +} + +func (i *cachedIndex) getRequestedAt() time.Time { + return time.Unix(i.requestedAt.Load(), 0) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go index a477a74250f..b90e36d6d5b 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers.go @@ -1,12 +1,20 @@ package bucketindex import ( + "context" "fmt" + "path" "path/filepath" "strings" "github.com/oklog/ulid" + "github.com/pkg/errors" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/bucket" ) const ( @@ -36,3 +44,46 @@ func IsBlockDeletionMarkFilename(name string) (ulid.ULID, bool) { id, err := ulid.Parse(filepath.Base(parts[0])) return id, err == nil } + +// MigrateBlockDeletionMarksToGlobalLocation list all tenant's blocks and, for each of them, look for +// a deletion mark in the block location. Found deletion marks are copied to the global markers location. +// The migration continues on error and returns once all blocks have been checked. +func MigrateBlockDeletionMarksToGlobalLocation(ctx context.Context, bkt objstore.Bucket, userID string) error { + userBucket := bucket.NewUserBucketClient(userID, bkt) + + // Find all blocks in the storage. + var blocks []ulid.ULID + err := userBucket.Iter(ctx, "", func(name string) error { + if id, ok := block.IsBlockDir(name); ok { + blocks = append(blocks, id) + } + return nil + }) + if err != nil { + return errors.Wrap(err, "list blocks") + } + + errs := tsdb_errors.NewMulti() + + for _, blockID := range blocks { + // Look up the deletion mark (if any). + reader, err := userBucket.Get(ctx, path.Join(blockID.String(), metadata.DeletionMarkFilename)) + if userBucket.IsObjNotFoundErr(err) { + continue + } else if err != nil { + errs.Add(err) + continue + } + + // Upload it to the global markers location. + uploadErr := userBucket.Upload(ctx, BlockDeletionMarkFilepath(blockID), reader) + if closeErr := reader.Close(); closeErr != nil { + errs.Add(closeErr) + } + if uploadErr != nil { + errs.Add(uploadErr) + } + } + + return errs.Err() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go index 1dcfdea9813..f4eb5f85978 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/markers_bucket_client.go @@ -110,6 +110,24 @@ func (b *globalMarkersBucket) Attributes(ctx context.Context, name string) (objs return b.parent.Attributes(ctx, name) } +// WithExpectedErrs implements objstore.InstrumentedBucket. +func (b *globalMarkersBucket) WithExpectedErrs(fn objstore.IsOpFailureExpectedFunc) objstore.Bucket { + if ib, ok := b.parent.(objstore.InstrumentedBucket); ok { + return ib.WithExpectedErrs(fn) + } + + return b +} + +// ReaderWithExpectedErrs implements objstore.InstrumentedBucketReader. +func (b *globalMarkersBucket) ReaderWithExpectedErrs(fn objstore.IsOpFailureExpectedFunc) objstore.BucketReader { + if ib, ok := b.parent.(objstore.InstrumentedBucketReader); ok { + return ib.ReaderWithExpectedErrs(fn) + } + + return b +} + func (b *globalMarkersBucket) isBlockDeletionMark(name string) (ulid.ULID, bool) { if path.Base(name) != metadata.DeletionMarkFilename { return ulid.ULID{}, false diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go deleted file mode 100644 index 3ad3979847a..00000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/reader.go +++ /dev/null @@ -1,50 +0,0 @@ -package bucketindex - -import ( - "compress/gzip" - "context" - "encoding/json" - - "github.com/go-kit/kit/log" - "github.com/pkg/errors" - "github.com/thanos-io/thanos/pkg/objstore" - "github.com/thanos-io/thanos/pkg/runutil" - - "github.com/cortexproject/cortex/pkg/storage/bucket" -) - -var ( - ErrIndexNotFound = errors.New("bucket index not found") - ErrIndexCorrupted = errors.New("bucket index corrupted") -) - -// ReadIndex reads, parses and returns a bucket index from the bucket. -func ReadIndex(ctx context.Context, bkt objstore.Bucket, userID string, logger log.Logger) (*Index, error) { - bkt = bucket.NewUserBucketClient(userID, bkt) - - // Get the bucket index. - reader, err := bkt.Get(ctx, IndexCompressedFilename) - if err != nil { - if bkt.IsObjNotFoundErr(err) { - return nil, ErrIndexNotFound - } - return nil, errors.Wrap(err, "read bucket index") - } - defer runutil.CloseWithLogOnErr(logger, reader, "close bucket index reader") - - // Read all the content. - gzipReader, err := gzip.NewReader(reader) - if err != nil { - return nil, ErrIndexCorrupted - } - defer runutil.CloseWithLogOnErr(logger, gzipReader, "close bucket index gzip reader") - - // Deserialize it. - index := &Index{} - d := json.NewDecoder(gzipReader) - if err := d.Decode(index); err != nil { - return nil, ErrIndexCorrupted - } - - return index, nil -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/storage.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/storage.go new file mode 100644 index 00000000000..97953c960ad --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/storage.go @@ -0,0 +1,92 @@ +package bucketindex + +import ( + "bytes" + "compress/gzip" + "context" + "encoding/json" + + "github.com/go-kit/kit/log" + "github.com/pkg/errors" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" + + "github.com/cortexproject/cortex/pkg/storage/bucket" +) + +var ( + ErrIndexNotFound = errors.New("bucket index not found") + ErrIndexCorrupted = errors.New("bucket index corrupted") +) + +// ReadIndex reads, parses and returns a bucket index from the bucket. +func ReadIndex(ctx context.Context, bkt objstore.Bucket, userID string, logger log.Logger) (*Index, error) { + userBkt := bucket.NewUserBucketClient(userID, bkt) + + // Get the bucket index. + reader, err := userBkt.WithExpectedErrs(userBkt.IsObjNotFoundErr).Get(ctx, IndexCompressedFilename) + if err != nil { + if userBkt.IsObjNotFoundErr(err) { + return nil, ErrIndexNotFound + } + return nil, errors.Wrap(err, "read bucket index") + } + defer runutil.CloseWithLogOnErr(logger, reader, "close bucket index reader") + + // Read all the content. + gzipReader, err := gzip.NewReader(reader) + if err != nil { + return nil, ErrIndexCorrupted + } + defer runutil.CloseWithLogOnErr(logger, gzipReader, "close bucket index gzip reader") + + // Deserialize it. + index := &Index{} + d := json.NewDecoder(gzipReader) + if err := d.Decode(index); err != nil { + return nil, ErrIndexCorrupted + } + + return index, nil +} + +// WriteIndex uploads the provided index to the storage. +func WriteIndex(ctx context.Context, bkt objstore.Bucket, userID string, idx *Index) error { + bkt = bucket.NewUserBucketClient(userID, bkt) + + // Marshal the index. + content, err := json.Marshal(idx) + if err != nil { + return errors.Wrap(err, "marshal bucket index") + } + + // Compress it. + var gzipContent bytes.Buffer + gzip := gzip.NewWriter(&gzipContent) + gzip.Name = IndexFilename + + if _, err := gzip.Write(content); err != nil { + return errors.Wrap(err, "gzip bucket index") + } + if err := gzip.Close(); err != nil { + return errors.Wrap(err, "close gzip bucket index") + } + + // Upload the index to the storage. + if err := bkt.Upload(ctx, IndexCompressedFilename, &gzipContent); err != nil { + return errors.Wrap(err, "upload bucket index") + } + + return nil +} + +// DeleteIndex deletes the bucket index from the storage. No error is returned if the index +// does not exist. +func DeleteIndex(ctx context.Context, bkt objstore.Bucket, userID string) error { + bkt = bucket.NewUserBucketClient(userID, bkt) + err := bkt.Delete(ctx, IndexCompressedFilename) + if err != nil && !bkt.IsObjNotFoundErr(err) { + return errors.Wrap(err, "delete bucket index") + } + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go similarity index 65% rename from vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go rename to vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go index 55f37020a9a..4c547deca92 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/writer.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex/updater.go @@ -1,8 +1,6 @@ package bucketindex import ( - "bytes" - "compress/gzip" "context" "encoding/json" "io/ioutil" @@ -19,7 +17,7 @@ import ( "github.com/thanos-io/thanos/pkg/runutil" "github.com/cortexproject/cortex/pkg/storage/bucket" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) var ( @@ -29,56 +27,22 @@ var ( ErrBlockDeletionMarkCorrupted = errors.New("block deletion mark corrupted") ) -// Writer is responsible to generate and write a bucket index. -type Writer struct { +// Updater is responsible to generate an update in-memory bucket index. +type Updater struct { bkt objstore.InstrumentedBucket logger log.Logger } -func NewWriter(bkt objstore.Bucket, userID string, logger log.Logger) *Writer { - return &Writer{ +func NewUpdater(bkt objstore.Bucket, userID string, logger log.Logger) *Updater { + return &Updater{ bkt: bucket.NewUserBucketClient(userID, bkt), - logger: util.WithUserID(userID, logger), + logger: util_log.WithUserID(userID, logger), } } -// WriteIndex generates the bucket index and writes it to the storage. If the old index is not -// passed in input, then the bucket index will be generated from scratch. -func (w *Writer) WriteIndex(ctx context.Context, old *Index) (*Index, error) { - idx, err := w.GenerateIndex(ctx, old) - if err != nil { - return nil, errors.Wrap(err, "generate bucket index") - } - - // Marshal the index. - content, err := json.Marshal(idx) - if err != nil { - return nil, errors.Wrap(err, "marshal bucket index") - } - - // Compress it. - var gzipContent bytes.Buffer - gzip := gzip.NewWriter(&gzipContent) - gzip.Name = IndexFilename - - if _, err := gzip.Write(content); err != nil { - return nil, errors.Wrap(err, "gzip bucket index") - } - if err := gzip.Close(); err != nil { - return nil, errors.Wrap(err, "close gzip bucket index") - } - - // Upload the index to the storage. - if err := w.bkt.Upload(ctx, IndexCompressedFilename, &gzipContent); err != nil { - return nil, errors.Wrap(err, "upload bucket index") - } - - return idx, nil -} - -// GenerateIndex generates the bucket index and returns it, without storing it to the storage. +// UpdateIndex generates the bucket index and returns it, without storing it to the storage. // If the old index is not passed in input, then the bucket index will be generated from scratch. -func (w *Writer) GenerateIndex(ctx context.Context, old *Index) (*Index, error) { +func (w *Updater) UpdateIndex(ctx context.Context, old *Index) (*Index, map[ulid.ULID]error, error) { var oldBlocks []*Block var oldBlockDeletionMarks []*BlockDeletionMark @@ -88,14 +52,14 @@ func (w *Writer) GenerateIndex(ctx context.Context, old *Index) (*Index, error) oldBlockDeletionMarks = old.BlockDeletionMarks } - blocks, err := w.generateBlocksIndex(ctx, oldBlocks) + blocks, partials, err := w.updateBlocks(ctx, oldBlocks) if err != nil { - return nil, err + return nil, nil, err } - blockDeletionMarks, err := w.generateBlockDeletionMarksIndex(ctx, oldBlockDeletionMarks) + blockDeletionMarks, err := w.updateBlockDeletionMarks(ctx, oldBlockDeletionMarks) if err != nil { - return nil, err + return nil, nil, err } return &Index{ @@ -103,12 +67,12 @@ func (w *Writer) GenerateIndex(ctx context.Context, old *Index) (*Index, error) Blocks: blocks, BlockDeletionMarks: blockDeletionMarks, UpdatedAt: time.Now().Unix(), - }, nil + }, partials, nil } -func (w *Writer) generateBlocksIndex(ctx context.Context, old []*Block) ([]*Block, error) { - out := make([]*Block, 0, len(old)) +func (w *Updater) updateBlocks(ctx context.Context, old []*Block) (blocks []*Block, partials map[ulid.ULID]error, _ error) { discovered := map[ulid.ULID]struct{}{} + partials = map[ulid.ULID]error{} // Find all blocks in the storage. err := w.bkt.Iter(ctx, "", func(name string) error { @@ -118,13 +82,13 @@ func (w *Writer) generateBlocksIndex(ctx context.Context, old []*Block) ([]*Bloc return nil }) if err != nil { - return nil, errors.Wrap(err, "list blocks") + return nil, nil, errors.Wrap(err, "list blocks") } // Since blocks are immutable, all blocks already existing in the index can just be copied. for _, b := range old { if _, ok := discovered[b.ID]; ok { - out = append(out, b) + blocks = append(blocks, b) delete(discovered, b.ID) } } @@ -133,26 +97,29 @@ func (w *Writer) generateBlocksIndex(ctx context.Context, old []*Block) ([]*Bloc // to find out if their upload has been completed (meta.json is uploaded last) and get the block // information to store in the bucket index. for id := range discovered { - b, err := w.generateBlockIndexEntry(ctx, id) + b, err := w.updateBlockIndexEntry(ctx, id) + if err == nil { + blocks = append(blocks, b) + continue + } + if errors.Is(err, ErrBlockMetaNotFound) { - level.Warn(w.logger).Log("msg", "skipped partial block when generating bucket index", "block", id.String()) + partials[id] = err + level.Warn(w.logger).Log("msg", "skipped partial block when updating bucket index", "block", id.String()) continue } if errors.Is(err, ErrBlockMetaCorrupted) { - level.Error(w.logger).Log("msg", "skipped block with corrupted meta.json when generating bucket index", "block", id.String(), "err", err) + partials[id] = err + level.Error(w.logger).Log("msg", "skipped block with corrupted meta.json when updating bucket index", "block", id.String(), "err", err) continue } - if err != nil { - return nil, err - } - - out = append(out, b) + return nil, nil, err } - return out, nil + return blocks, partials, nil } -func (w *Writer) generateBlockIndexEntry(ctx context.Context, id ulid.ULID) (*Block, error) { +func (w *Updater) updateBlockIndexEntry(ctx context.Context, id ulid.ULID) (*Block, error) { metaFile := path.Join(id.String(), block.MetaFilename) // Get the block's meta.json file. @@ -196,7 +163,7 @@ func (w *Writer) generateBlockIndexEntry(ctx context.Context, id ulid.ULID) (*Bl return block, nil } -func (w *Writer) generateBlockDeletionMarksIndex(ctx context.Context, old []*BlockDeletionMark) ([]*BlockDeletionMark, error) { +func (w *Updater) updateBlockDeletionMarks(ctx context.Context, old []*BlockDeletionMark) ([]*BlockDeletionMark, error) { out := make([]*BlockDeletionMark, 0, len(old)) discovered := map[ulid.ULID]struct{}{} @@ -221,14 +188,14 @@ func (w *Writer) generateBlockDeletionMarksIndex(ctx context.Context, old []*Blo // Remaining markers are new ones and we have to fetch them. for id := range discovered { - m, err := w.generateBlockDeletionMarkIndexEntry(ctx, id) + m, err := w.updateBlockDeletionMarkIndexEntry(ctx, id) if errors.Is(err, ErrBlockDeletionMarkNotFound) { // This could happen if the block is permanently deleted between the "list objects" and now. - level.Warn(w.logger).Log("msg", "skipped missing block deletion mark when generating bucket index", "block", id.String()) + level.Warn(w.logger).Log("msg", "skipped missing block deletion mark when updating bucket index", "block", id.String()) continue } if errors.Is(err, ErrBlockDeletionMarkCorrupted) { - level.Error(w.logger).Log("msg", "skipped corrupted block deletion mark when generating bucket index", "block", id.String(), "err", err) + level.Error(w.logger).Log("msg", "skipped corrupted block deletion mark when updating bucket index", "block", id.String(), "err", err) continue } if err != nil { @@ -241,10 +208,13 @@ func (w *Writer) generateBlockDeletionMarksIndex(ctx context.Context, old []*Blo return out, nil } -func (w *Writer) generateBlockDeletionMarkIndexEntry(ctx context.Context, id ulid.ULID) (*BlockDeletionMark, error) { +func (w *Updater) updateBlockDeletionMarkIndexEntry(ctx context.Context, id ulid.ULID) (*BlockDeletionMark, error) { m := metadata.DeletionMark{} if err := metadata.ReadMarker(ctx, w.logger, w.bkt, id.String(), &m); err != nil { + if errors.Is(err, metadata.ErrorMarkerNotFound) { + return nil, errors.Wrap(ErrBlockDeletionMarkNotFound, err.Error()) + } if errors.Is(err, metadata.ErrorUnmarshalMarker) { return nil, errors.Wrap(ErrBlockDeletionMarkCorrupted, err.Error()) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go index c4550ec899a..caa7cbd1e9f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/caching_bucket.go @@ -3,14 +3,17 @@ package tsdb import ( "flag" "fmt" + "path/filepath" "regexp" "strings" "time" "github.com/go-kit/kit/log" "github.com/golang/snappy" + "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/thanos-io/thanos/pkg/block" "github.com/thanos-io/thanos/pkg/block/metadata" "github.com/thanos-io/thanos/pkg/cache" "github.com/thanos-io/thanos/pkg/cacheutil" @@ -69,14 +72,17 @@ func (cfg *ChunksCacheConfig) Validate() error { type MetadataCacheConfig struct { CacheBackend `yaml:",inline"` - TenantsListTTL time.Duration `yaml:"tenants_list_ttl"` - TenantBlocksListTTL time.Duration `yaml:"tenant_blocks_list_ttl"` - ChunksListTTL time.Duration `yaml:"chunks_list_ttl"` - MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` - MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` - MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"` - MetafileMaxSize int `yaml:"metafile_max_size_bytes"` - MetafileAttributesTTL time.Duration `yaml:"metafile_attributes_ttl"` + TenantsListTTL time.Duration `yaml:"tenants_list_ttl"` + TenantBlocksListTTL time.Duration `yaml:"tenant_blocks_list_ttl"` + ChunksListTTL time.Duration `yaml:"chunks_list_ttl"` + MetafileExistsTTL time.Duration `yaml:"metafile_exists_ttl"` + MetafileDoesntExistTTL time.Duration `yaml:"metafile_doesnt_exist_ttl"` + MetafileContentTTL time.Duration `yaml:"metafile_content_ttl"` + MetafileMaxSize int `yaml:"metafile_max_size_bytes"` + MetafileAttributesTTL time.Duration `yaml:"metafile_attributes_ttl"` + BlockIndexAttributesTTL time.Duration `yaml:"block_index_attributes_ttl"` + BucketIndexContentTTL time.Duration `yaml:"bucket_index_content_ttl"` + BucketIndexMaxSize int `yaml:"bucket_index_max_size_bytes"` } func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { @@ -90,8 +96,11 @@ func (cfg *MetadataCacheConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix f.DurationVar(&cfg.MetafileExistsTTL, prefix+"metafile-exists-ttl", 2*time.Hour, "How long to cache information that block metafile exists. Also used for user deletion mark file.") f.DurationVar(&cfg.MetafileDoesntExistTTL, prefix+"metafile-doesnt-exist-ttl", 5*time.Minute, "How long to cache information that block metafile doesn't exist. Also used for user deletion mark file.") f.DurationVar(&cfg.MetafileContentTTL, prefix+"metafile-content-ttl", 24*time.Hour, "How long to cache content of the metafile.") - f.IntVar(&cfg.MetafileMaxSize, prefix+"metafile-max-size-bytes", 1*1024*1024, "Maximum size of metafile content to cache in bytes.") + f.IntVar(&cfg.MetafileMaxSize, prefix+"metafile-max-size-bytes", 1*1024*1024, "Maximum size of metafile content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).") f.DurationVar(&cfg.MetafileAttributesTTL, prefix+"metafile-attributes-ttl", 168*time.Hour, "How long to cache attributes of the block metafile.") + f.DurationVar(&cfg.BlockIndexAttributesTTL, prefix+"block-index-attributes-ttl", 168*time.Hour, "How long to cache attributes of the block index.") + f.DurationVar(&cfg.BucketIndexContentTTL, prefix+"bucket-index-content-ttl", 5*time.Minute, "How long to cache content of the bucket index.") + f.IntVar(&cfg.BucketIndexMaxSize, prefix+"bucket-index-max-size-bytes", 1*1024*1024, "Maximum size of bucket index content to cache in bytes. Caching will be skipped if the content exceeds this size. This is useful to avoid network round trip for large content if the configured caching backend has an hard limit on cached items size (in this case, you should set this limit to the same limit in the caching backend).") } func (cfg *MetadataCacheConfig) Validate() error { @@ -123,6 +132,8 @@ func CreateCachingBucket(chunksConfig ChunksCacheConfig, metadataConfig Metadata cfg.CacheExists("metafile", metadataCache, isMetaFile, metadataConfig.MetafileExistsTTL, metadataConfig.MetafileDoesntExistTTL) cfg.CacheGet("metafile", metadataCache, isMetaFile, metadataConfig.MetafileMaxSize, metadataConfig.MetafileContentTTL, metadataConfig.MetafileExistsTTL, metadataConfig.MetafileDoesntExistTTL) cfg.CacheAttributes("metafile", metadataCache, isMetaFile, metadataConfig.MetafileAttributesTTL) + cfg.CacheAttributes("block-index", metadataCache, isBlockIndexFile, metadataConfig.BlockIndexAttributesTTL) + cfg.CacheGet("bucket-index", metadataCache, isBucketIndexFile, metadataConfig.BucketIndexMaxSize, metadataConfig.BucketIndexContentTTL /* do not cache exist / not exist: */, 0, 0) codec := snappyIterCodec{storecache.JSONIterCodec{}} cfg.CacheIter("tenants-iter", metadataCache, isTenantsDir, metadataConfig.TenantsListTTL, codec) @@ -165,6 +176,21 @@ func isMetaFile(name string) bool { return strings.HasSuffix(name, "/"+metadata.MetaFilename) || strings.HasSuffix(name, "/"+metadata.DeletionMarkFilename) || strings.HasSuffix(name, "/"+TenantDeletionMarkPath) } +func isBlockIndexFile(name string) bool { + // Ensure the path ends with "/". + if !strings.HasSuffix(name, "/"+block.IndexFilename) { + return false + } + + _, err := ulid.Parse(filepath.Base(filepath.Dir(name))) + return err == nil +} + +func isBucketIndexFile(name string) bool { + // TODO can't reference bucketindex because of a circular dependency. To be fixed. + return strings.HasSuffix(name, "/bucket-index.json.gz") +} + func isTenantsDir(name string) bool { return name == "" } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go index 5105a79d716..11c635cd7d1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/config.go @@ -50,7 +50,7 @@ var ( //nolint:golint type BlocksStorageConfig struct { Bucket bucket.Config `yaml:",inline"` - BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the store-gateway synchronizes blocks stored in the bucket."` + BucketStore BucketStoreConfig `yaml:"bucket_store" doc:"description=This configures how the querier and store-gateway discover and synchronize blocks stored in the bucket."` TSDB TSDBConfig `yaml:"tsdb"` } @@ -206,7 +206,12 @@ func (cfg *TSDBConfig) BlocksDir(userID string) string { return filepath.Join(cfg.Dir, userID) } -// BucketStoreConfig holds the config information for Bucket Stores used by the querier +// IsShippingEnabled returns whether blocks shipping is enabled. +func (cfg *TSDBConfig) IsBlocksShippingEnabled() bool { + return cfg.ShipInterval > 0 +} + +// BucketStoreConfig holds the config information for Bucket Stores used by the querier and store-gateway. type BucketStoreConfig struct { SyncDir string `yaml:"sync_dir"` SyncInterval time.Duration `yaml:"sync_interval"` @@ -220,11 +225,11 @@ type BucketStoreConfig struct { ChunksCache ChunksCacheConfig `yaml:"chunks_cache"` MetadataCache MetadataCacheConfig `yaml:"metadata_cache"` IgnoreDeletionMarksDelay time.Duration `yaml:"ignore_deletion_mark_delay"` + BucketIndex BucketIndexConfig `yaml:"bucket_index"` - // Controls whether index-header lazy loading is enabled. This config option is hidden - // while it is marked as experimental. - IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled" doc:"hidden"` - IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout" doc:"hidden"` + // Controls whether index-header lazy loading is enabled. + IndexHeaderLazyLoadingEnabled bool `yaml:"index_header_lazy_loading_enabled"` + IndexHeaderLazyLoadingIdleTimeout time.Duration `yaml:"index_header_lazy_loading_idle_timeout"` // Controls what is the ratio of postings offsets store will hold in memory. // Larger value will keep less offsets, which will increase CPU cycles needed for query touching those postings. @@ -239,9 +244,10 @@ func (cfg *BucketStoreConfig) RegisterFlags(f *flag.FlagSet) { cfg.IndexCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.index-cache.") cfg.ChunksCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.chunks-cache.") cfg.MetadataCache.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.metadata-cache.") + cfg.BucketIndex.RegisterFlagsWithPrefix(f, "blocks-storage.bucket-store.bucket-index.") f.StringVar(&cfg.SyncDir, "blocks-storage.bucket-store.sync-dir", "tsdb-sync", "Directory to store synchronized TSDB index headers.") - f.DurationVar(&cfg.SyncInterval, "blocks-storage.bucket-store.sync-interval", 5*time.Minute, "How frequently scan the bucket to look for changes (new blocks shipped by ingesters and blocks removed by retention or compaction). 0 disables it.") + f.DurationVar(&cfg.SyncInterval, "blocks-storage.bucket-store.sync-interval", 15*time.Minute, "How frequently to scan the bucket, or to refresh the bucket index (if enabled), in order to look for changes (new blocks shipped by ingesters and blocks deleted by retention or compaction).") f.Uint64Var(&cfg.MaxChunkPoolBytes, "blocks-storage.bucket-store.max-chunk-pool-bytes", uint64(2*units.Gibibyte), "Max size - in bytes - of a per-tenant chunk pool, used to reduce memory allocations.") f.IntVar(&cfg.MaxConcurrent, "blocks-storage.bucket-store.max-concurrent", 100, "Max number of concurrent queries to execute against the long-term storage. The limit is shared across all tenants.") f.IntVar(&cfg.TenantSyncConcurrency, "blocks-storage.bucket-store.tenant-sync-concurrency", 10, "Maximum number of concurrent tenants synching blocks.") @@ -272,3 +278,17 @@ func (cfg *BucketStoreConfig) Validate() error { } return nil } + +type BucketIndexConfig struct { + Enabled bool `yaml:"enabled"` + UpdateOnErrorInterval time.Duration `yaml:"update_on_error_interval"` + IdleTimeout time.Duration `yaml:"idle_timeout"` + MaxStalePeriod time.Duration `yaml:"max_stale_period"` +} + +func (cfg *BucketIndexConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { + f.BoolVar(&cfg.Enabled, prefix+"enabled", false, "True to enable querier and store-gateway to discover blocks in the storage via bucket index instead of bucket scanning.") + f.DurationVar(&cfg.UpdateOnErrorInterval, prefix+"update-on-error-interval", time.Minute, "How frequently a bucket index, which previously failed to load, should be tried to load again. This option is used only by querier.") + f.DurationVar(&cfg.IdleTimeout, prefix+"idle-timeout", time.Hour, "How long a unused bucket index should be cached. Once this timeout expires, the unused bucket index is removed from the in-memory cache. This option is used only by querier.") + f.DurationVar(&cfg.MaxStalePeriod, prefix+"max-stale-period", time.Hour, "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, while this check is enforced in the querier (at query time).") +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go index 1d4872323ef..5253b1f6628 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storage/tsdb/tenant_deletion_mark.go @@ -7,8 +7,11 @@ import ( "path" "time" + "github.com/go-kit/kit/log/level" "github.com/pkg/errors" "github.com/thanos-io/thanos/pkg/objstore" + + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Relative to user-specific prefix. @@ -17,6 +20,13 @@ const TenantDeletionMarkPath = "markers/tenant-deletion-mark.json" type TenantDeletionMark struct { // Unix timestamp when deletion marker was created. DeletionTime int64 `json:"deletion_time"` + + // Unix timestamp when cleanup was finished. + FinishedTime int64 `json:"finished_time,omitempty"` +} + +func NewTenantDeletionMark(deletionTime time.Time) *TenantDeletionMark { + return &TenantDeletionMark{DeletionTime: deletionTime.Unix()} } // Checks for deletion mark for tenant. Errors other than "object not found" are returned. @@ -27,10 +37,8 @@ func TenantDeletionMarkExists(ctx context.Context, bkt objstore.BucketReader, us } // Uploads deletion mark to the tenant "directory". -func WriteTenantDeletionMark(ctx context.Context, bkt objstore.Bucket, userID string) error { - m := &TenantDeletionMark{DeletionTime: time.Now().Unix()} - - data, err := json.Marshal(m) +func WriteTenantDeletionMark(ctx context.Context, bkt objstore.Bucket, userID string, mark *TenantDeletionMark) error { + data, err := json.Marshal(mark) if err != nil { return errors.Wrap(err, "serialize tenant deletion mark") } @@ -38,3 +46,31 @@ func WriteTenantDeletionMark(ctx context.Context, bkt objstore.Bucket, userID st markerFile := path.Join(userID, TenantDeletionMarkPath) return errors.Wrap(bkt.Upload(ctx, markerFile, bytes.NewReader(data)), "upload tenant deletion mark") } + +// Returns tenant deletion mark for given user, if it exists. If it doesn't exist, returns nil mark, and no error. +func ReadTenantDeletionMark(ctx context.Context, bkt objstore.BucketReader, userID string) (*TenantDeletionMark, error) { + markerFile := path.Join(userID, TenantDeletionMarkPath) + + r, err := bkt.Get(ctx, markerFile) + if err != nil { + if bkt.IsObjNotFoundErr(err) { + return nil, nil + } + + return nil, errors.Wrapf(err, "failed to read deletion mark object: %s", markerFile) + } + + mark := &TenantDeletionMark{} + err = json.NewDecoder(r).Decode(mark) + + // Close reader before dealing with decode error. + if closeErr := r.Close(); closeErr != nil { + level.Warn(util_log.Logger).Log("msg", "failed to close bucket reader", "err", closeErr) + } + + if err != nil { + return nil, errors.Wrapf(err, "failed to decode deletion mark object: %s", markerFile) + } + + return mark, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go new file mode 100644 index 00000000000..8491b601daf --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_index_metadata_fetcher.go @@ -0,0 +1,236 @@ +package storegateway + +import ( + "context" + "time" + + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" + "github.com/oklog/ulid" + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/extprom" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" +) + +// BucketIndexMetadataFetcher is a Thanos MetadataFetcher implementation leveraging on the Cortex bucket index. +type BucketIndexMetadataFetcher struct { + userID string + bkt objstore.Bucket + strategy ShardingStrategy + logger log.Logger + filters []block.MetadataFilter + modifiers []block.MetadataModifier + metrics *fetcherMetrics +} + +func NewBucketIndexMetadataFetcher( + userID string, + bkt objstore.Bucket, + strategy ShardingStrategy, + logger log.Logger, + reg prometheus.Registerer, + filters []block.MetadataFilter, + modifiers []block.MetadataModifier, +) *BucketIndexMetadataFetcher { + return &BucketIndexMetadataFetcher{ + userID: userID, + bkt: bkt, + strategy: strategy, + logger: logger, + filters: filters, + modifiers: modifiers, + metrics: newFetcherMetrics(reg), + } +} + +// Fetch implements metadata.MetadataFetcher. +func (f *BucketIndexMetadataFetcher) Fetch(ctx context.Context) (metas map[ulid.ULID]*metadata.Meta, partial map[ulid.ULID]error, err error) { + f.metrics.resetTx() + + // Check whether the user belongs to the shard. + if len(f.strategy.FilterUsers(ctx, []string{f.userID})) != 1 { + f.metrics.submit() + return nil, nil, nil + } + + // Track duration and sync counters only if wasn't filtered out by the sharding strategy. + start := time.Now() + defer func() { + f.metrics.syncDuration.Observe(time.Since(start).Seconds()) + if err != nil { + f.metrics.syncFailures.Inc() + } + }() + f.metrics.syncs.Inc() + + // Fetch the bucket index. + idx, err := bucketindex.ReadIndex(ctx, f.bkt, f.userID, f.logger) + if errors.Is(err, bucketindex.ErrIndexNotFound) { + // This is a legit case happening when the first blocks of a tenant have recently been uploaded by ingesters + // and their bucket index has not been created yet. + f.metrics.synced.WithLabelValues(noBucketIndex).Set(1) + f.metrics.submit() + + return nil, nil, nil + } + if errors.Is(err, bucketindex.ErrIndexCorrupted) { + // In case a single tenant bucket index is corrupted, we don't want the store-gateway to fail at startup + // because unable to fetch blocks metadata. We'll act as if the tenant has no bucket index, but the query + // will fail anyway in the querier (the querier fails in the querier if bucket index is corrupted). + level.Error(f.logger).Log("msg", "corrupted bucket index found", "user", f.userID, "err", err) + f.metrics.synced.WithLabelValues(corruptedBucketIndex).Set(1) + f.metrics.submit() + + return nil, nil, nil + } + if err != nil { + f.metrics.synced.WithLabelValues(failedMeta).Set(1) + f.metrics.submit() + + return nil, nil, errors.Wrapf(err, "read bucket index") + } + + // Build block metas out of the index. + metas = make(map[ulid.ULID]*metadata.Meta, len(idx.Blocks)) + for _, b := range idx.Blocks { + metas[b.ID] = b.ThanosMeta(f.userID) + } + + for _, filter := range f.filters { + var err error + + // NOTE: filter can update synced metric accordingly to the reason of the exclude. + if customFilter, ok := filter.(MetadataFilterWithBucketIndex); ok { + err = customFilter.FilterWithBucketIndex(ctx, metas, idx, f.metrics.synced) + } else { + err = filter.Filter(ctx, metas, f.metrics.synced) + } + + if err != nil { + return nil, nil, errors.Wrap(err, "filter metas") + } + } + + for _, m := range f.modifiers { + // NOTE: modifier can update modified metric accordingly to the reason of the modification. + if err := m.Modify(ctx, metas, f.metrics.modified); err != nil { + return nil, nil, errors.Wrap(err, "modify metas") + } + } + + f.metrics.synced.WithLabelValues(loadedMeta).Set(float64(len(metas))) + f.metrics.submit() + + return metas, nil, nil +} + +func (f *BucketIndexMetadataFetcher) UpdateOnChange(callback func([]metadata.Meta, error)) { + // Unused by the store-gateway. + callback(nil, errors.New("UpdateOnChange is unsupported")) +} + +const ( + fetcherSubSys = "blocks_meta" + + corruptedMeta = "corrupted-meta-json" + noMeta = "no-meta-json" + loadedMeta = "loaded" + failedMeta = "failed" + corruptedBucketIndex = "corrupted-bucket-index" + noBucketIndex = "no-bucket-index" + + // Synced label values. + labelExcludedMeta = "label-excluded" + timeExcludedMeta = "time-excluded" + tooFreshMeta = "too-fresh" + duplicateMeta = "duplicate" + // Blocks that are marked for deletion can be loaded as well. This is done to make sure that we load blocks that are meant to be deleted, + // but don't have a replacement block yet. + markedForDeletionMeta = "marked-for-deletion" + + // MarkedForNoCompactionMeta is label for blocks which are loaded but also marked for no compaction. This label is also counted in `loaded` label metric. + MarkedForNoCompactionMeta = "marked-for-no-compact" + + // Modified label values. + replicaRemovedMeta = "replica-label-removed" +) + +// fetcherMetrics is a copy of Thanos internal fetcherMetrics. These metrics have been copied from +// Thanos in order to track the same exact metrics in our own custom metadata fetcher implementation. +type fetcherMetrics struct { + syncs prometheus.Counter + syncFailures prometheus.Counter + syncDuration prometheus.Histogram + + synced *extprom.TxGaugeVec + modified *extprom.TxGaugeVec +} + +func newFetcherMetrics(reg prometheus.Registerer) *fetcherMetrics { + var m fetcherMetrics + + m.syncs = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Subsystem: fetcherSubSys, + Name: "syncs_total", + Help: "Total blocks metadata synchronization attempts", + }) + m.syncFailures = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + Subsystem: fetcherSubSys, + Name: "sync_failures_total", + Help: "Total blocks metadata synchronization failures", + }) + m.syncDuration = promauto.With(reg).NewHistogram(prometheus.HistogramOpts{ + Subsystem: fetcherSubSys, + Name: "sync_duration_seconds", + Help: "Duration of the blocks metadata synchronization in seconds", + Buckets: []float64{0.01, 1, 10, 100, 1000}, + }) + m.synced = extprom.NewTxGaugeVec( + reg, + prometheus.GaugeOpts{ + Subsystem: fetcherSubSys, + Name: "synced", + Help: "Number of block metadata synced", + }, + []string{"state"}, + []string{corruptedMeta}, + []string{corruptedBucketIndex}, + []string{noMeta}, + []string{noBucketIndex}, + []string{loadedMeta}, + []string{tooFreshMeta}, + []string{failedMeta}, + []string{labelExcludedMeta}, + []string{timeExcludedMeta}, + []string{duplicateMeta}, + []string{markedForDeletionMeta}, + []string{MarkedForNoCompactionMeta}, + ) + m.modified = extprom.NewTxGaugeVec( + reg, + prometheus.GaugeOpts{ + Subsystem: fetcherSubSys, + Name: "modified", + Help: "Number of blocks whose metadata changed", + }, + []string{"modified"}, + []string{replicaRemovedMeta}, + ) + return &m +} + +func (s *fetcherMetrics) submit() { + s.synced.Submit() + s.modified.Submit() +} + +func (s *fetcherMetrics) resetTx() { + s.synced.ResetTx() + s.modified.ResetTx() +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go index cecd68e1971..392689ed1ce 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/bucket_stores.go @@ -28,7 +28,7 @@ import ( "github.com/cortexproject/cortex/pkg/storage/bucket" "github.com/cortexproject/cortex/pkg/storage/tsdb" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/spanlogger" "github.com/cortexproject/cortex/pkg/util/validation" ) @@ -324,50 +324,69 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro return bs, nil } - userLogger := util.WithUserID(userID, u.logger) + userLogger := util_log.WithUserID(userID, u.logger) level.Info(userLogger).Log("msg", "creating user bucket store") userBkt := bucket.NewUserBucketClient(userID, u.bucket) + fetcherReg := prometheus.NewRegistry() - // Wrap the bucket reader to skip iterating the bucket at all if the user doesn't - // belong to the store-gateway shard. We need to run the BucketStore synching anyway - // in order to unload previous tenants in case of a resharding leading to tenants - // moving out from the store-gateway shard and also make sure both MetaFetcher and - // BucketStore metrics are correctly updated. - fetcherBkt := NewShardingBucketReaderAdapter(userID, u.shardingStrategy, userBkt) + // The sharding strategy filter MUST be before the ones we create here (order matters). + filters := append([]block.MetadataFilter{NewShardingMetadataFilterAdapter(userID, u.shardingStrategy)}, []block.MetadataFilter{ + block.NewConsistencyDelayMetaFilter(userLogger, u.cfg.BucketStore.ConsistencyDelay, fetcherReg), + // Use our own custom implementation. + NewIgnoreDeletionMarkFilter(userLogger, userBkt, u.cfg.BucketStore.IgnoreDeletionMarksDelay, u.cfg.BucketStore.MetaSyncConcurrency), + // The duplicate filter has been intentionally omitted because it could cause troubles with + // the consistency check done on the querier. The duplicate filter removes redundant blocks + // but if the store-gateway removes redundant blocks before the querier discovers them, the + // consistency check on the querier will fail. + }...) + + modifiers := []block.MetadataModifier{ + // Remove Cortex external labels so that they're not injected when querying blocks. + NewReplicaLabelRemover(userLogger, []string{ + tsdb.TenantIDExternalLabel, + tsdb.IngesterIDExternalLabel, + tsdb.ShardIDExternalLabel, + }), + } - fetcherReg := prometheus.NewRegistry() - fetcher, err := block.NewMetaFetcher( - userLogger, - u.cfg.BucketStore.MetaSyncConcurrency, - fetcherBkt, - filepath.Join(u.cfg.BucketStore.SyncDir, userID), // The fetcher stores cached metas in the "meta-syncer/" sub directory - fetcherReg, - // The sharding strategy filter MUST be before the ones we create here (order matters). - append([]block.MetadataFilter{NewShardingMetadataFilterAdapter(userID, u.shardingStrategy)}, []block.MetadataFilter{ - block.NewConsistencyDelayMetaFilter(userLogger, u.cfg.BucketStore.ConsistencyDelay, fetcherReg), - block.NewIgnoreDeletionMarkFilter(userLogger, userBkt, u.cfg.BucketStore.IgnoreDeletionMarksDelay, u.cfg.BucketStore.MetaSyncConcurrency), - // The duplicate filter has been intentionally omitted because it could cause troubles with - // the consistency check done on the querier. The duplicate filter removes redundant blocks - // but if the store-gateway removes redundant blocks before the querier discovers them, the - // consistency check on the querier will fail. - }...), - []block.MetadataModifier{ - // Remove Cortex external labels so that they're not injected when querying blocks. - NewReplicaLabelRemover(userLogger, []string{ - tsdb.TenantIDExternalLabel, - tsdb.IngesterIDExternalLabel, - tsdb.ShardIDExternalLabel, - }), - }, - ) - if err != nil { - return nil, err + // Instantiate a different blocks metadata fetcher based on whether bucket index is enabled or not. + var fetcher block.MetadataFetcher + if u.cfg.BucketStore.BucketIndex.Enabled { + fetcher = NewBucketIndexMetadataFetcher( + userID, + u.bucket, + u.shardingStrategy, + u.logger, + fetcherReg, + filters, + modifiers) + } else { + // Wrap the bucket reader to skip iterating the bucket at all if the user doesn't + // belong to the store-gateway shard. We need to run the BucketStore synching anyway + // in order to unload previous tenants in case of a resharding leading to tenants + // moving out from the store-gateway shard and also make sure both MetaFetcher and + // BucketStore metrics are correctly updated. + fetcherBkt := NewShardingBucketReaderAdapter(userID, u.shardingStrategy, userBkt) + + var err error + fetcher, err = block.NewMetaFetcher( + userLogger, + u.cfg.BucketStore.MetaSyncConcurrency, + fetcherBkt, + filepath.Join(u.cfg.BucketStore.SyncDir, userID), // The fetcher stores cached metas in the "meta-syncer/" sub directory + fetcherReg, + filters, + modifiers, + ) + if err != nil { + return nil, err + } } bucketStoreReg := prometheus.NewRegistry() - bs, err = store.NewBucketStore( + bs, err := store.NewBucketStore( userLogger, bucketStoreReg, userBkt, @@ -377,7 +396,8 @@ func (u *BucketStores) getOrCreateStore(userID string) (*store.BucketStore, erro u.queryGate, u.cfg.BucketStore.MaxChunkPoolBytes, newChunksLimiterFactory(u.limits, userID), - u.logLevel.String() == "debug", // Turn on debug logging, if the log level is set to debug + store.NewSeriesLimiterFactory(0), // No series limiter. + u.logLevel.String() == "debug", // Turn on debug logging, if the log level is set to debug u.cfg.BucketStore.BlockSyncConcurrency, nil, // Do not limit timerange. false, // No need to enable backward compatibility with Thanos pre 0.8.0 queriers diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go index f94e08d43f9..db7716a5b59 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway.go @@ -163,7 +163,7 @@ func newStoreGateway(gatewayCfg Config, storageCfg cortex_tsdb.BlocksStorageConf } ringCfg := gatewayCfg.ShardingRing.ToRingConfig() - g.ring, err = ring.NewWithStoreClientAndStrategy(ringCfg, RingNameForServer, RingKey, ringStore, &BlocksReplicationStrategy{}) + g.ring, err = ring.NewWithStoreClientAndStrategy(ringCfg, RingNameForServer, RingKey, ringStore, ring.NewIgnoreUnhealthyInstancesReplicationStrategy()) if err != nil { return nil, errors.Wrap(err, "create ring client") } @@ -271,7 +271,7 @@ func (g *StoreGateway) running(ctx context.Context) error { defer syncTicker.Stop() if g.gatewayCfg.ShardingEnabled { - ringLastState, _ = g.ring.GetAllHealthy(ring.BlocksSync) // nolint:errcheck + ringLastState, _ = g.ring.GetAllHealthy(BlocksSync) // nolint:errcheck ringTicker := time.NewTicker(util.DurationWithJitter(g.gatewayCfg.ShardingRing.RingCheckPeriod, 0.2)) defer ringTicker.Stop() ringTickerChan = ringTicker.C @@ -284,7 +284,7 @@ func (g *StoreGateway) running(ctx context.Context) error { case <-ringTickerChan: // We ignore the error because in case of error it will return an empty // replication set which we use to compare with the previous state. - currRingState, _ := g.ring.GetAllHealthy(ring.BlocksSync) // nolint:errcheck + currRingState, _ := g.ring.GetAllHealthy(BlocksSync) // nolint:errcheck if ring.HasReplicationSetChanged(ringLastState, currRingState) { ringLastState = currRingState @@ -330,7 +330,7 @@ func (g *StoreGateway) LabelValues(ctx context.Context, req *storepb.LabelValues return g.stores.LabelValues(ctx, req) } -func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.IngesterDesc) (ring.IngesterState, ring.Tokens) { +func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.IngesterState, ring.Tokens) { // When we initialize the store-gateway instance in the ring we want to start from // a clean situation, so whatever is the state we set it JOINING, while we keep existing // tokens (if any) or the ones loaded from file. @@ -339,7 +339,7 @@ func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc tokens = instanceDesc.GetTokens() } - _, takenTokens := ringDesc.TokensFor(instanceID) + takenTokens := ringDesc.GetTokens() newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) // Tokens sorting will be enforced by the parent caller. @@ -350,7 +350,7 @@ func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc func (g *StoreGateway) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.IngesterDesc) { +func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { } func createBucketClient(cfg cortex_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go index 01d466b4558..22ce3ebea64 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_http.go @@ -6,7 +6,7 @@ import ( "github.com/go-kit/kit/log/level" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -32,7 +32,7 @@ func writeMessage(w http.ResponseWriter, message string) { }{Message: message}) if err != nil { - level.Error(util.Logger).Log("msg", "unable to serve store gateway ring page", "err", err) + level.Error(util_log.Logger).Log("msg", "unable to serve store gateway ring page", "err", err) } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go index 2fcb9b2faae..8f7f30bc87d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/gateway_ring.go @@ -10,8 +10,8 @@ import ( "github.com/cortexproject/cortex/pkg/ring" "github.com/cortexproject/cortex/pkg/ring/kv" - "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/flagext" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) const ( @@ -30,6 +30,20 @@ const ( RingNumTokens = 512 ) +var ( + // BlocksSync is the operation run by the store-gateway to sync blocks. + BlocksSync = ring.NewOp([]ring.IngesterState{ring.JOINING, ring.ACTIVE, ring.LEAVING}, func(s ring.IngesterState) bool { + // If the instance is JOINING or LEAVING we should extend the replica set: + // - JOINING: the previous replica set should be kept while an instance is JOINING + // - LEAVING: the instance is going to be decommissioned soon so we need to include + // another replica in the set + return s == ring.JOINING || s == ring.LEAVING + }) + + // BlocksRead is the operation run by the querier to query blocks via the store-gateway. + BlocksRead = ring.NewOp([]ring.IngesterState{ring.ACTIVE}, nil) +) + // RingConfig masks the ring lifecycler config which contains // many options not really required by the store gateways ring. This config // is used to strip down the config to the minimum, and avoid confusion @@ -58,7 +72,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet) { hostname, err := os.Hostname() if err != nil { - level.Error(util.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } @@ -92,6 +106,7 @@ func (cfg *RingConfig) ToRingConfig() ring.Config { rc.HeartbeatTimeout = cfg.HeartbeatTimeout rc.ReplicationFactor = cfg.ReplicationFactor rc.ZoneAwarenessEnabled = cfg.ZoneAwarenessEnabled + rc.SubringCacheDisabled = true return rc } diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go new file mode 100644 index 00000000000..7bd8693dd48 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_filters.go @@ -0,0 +1,78 @@ +package storegateway + +import ( + "context" + "time" + + "github.com/go-kit/kit/log" + "github.com/oklog/ulid" + "github.com/thanos-io/thanos/pkg/block" + "github.com/thanos-io/thanos/pkg/block/metadata" + "github.com/thanos-io/thanos/pkg/extprom" + "github.com/thanos-io/thanos/pkg/objstore" + + "github.com/cortexproject/cortex/pkg/storage/tsdb/bucketindex" +) + +type MetadataFilterWithBucketIndex interface { + // FilterWithBucketIndex is like Thanos MetadataFilter.Filter() but it provides in input the bucket index too. + FilterWithBucketIndex(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, idx *bucketindex.Index, synced *extprom.TxGaugeVec) error +} + +// IgnoreDeletionMarkFilter is like the Thanos IgnoreDeletionMarkFilter, but it also implements +// the MetadataFilterWithBucketIndex interface. +type IgnoreDeletionMarkFilter struct { + upstream *block.IgnoreDeletionMarkFilter + + delay time.Duration + deletionMarkMap map[ulid.ULID]*metadata.DeletionMark +} + +// NewIgnoreDeletionMarkFilter creates IgnoreDeletionMarkFilter. +func NewIgnoreDeletionMarkFilter(logger log.Logger, bkt objstore.InstrumentedBucketReader, delay time.Duration, concurrency int) *IgnoreDeletionMarkFilter { + return &IgnoreDeletionMarkFilter{ + upstream: block.NewIgnoreDeletionMarkFilter(logger, bkt, delay, concurrency), + delay: delay, + } +} + +// DeletionMarkBlocks returns blocks that were marked for deletion. +func (f *IgnoreDeletionMarkFilter) DeletionMarkBlocks() map[ulid.ULID]*metadata.DeletionMark { + // If the cached deletion marks exist it means the filter function was called with the bucket + // index, so it's safe to return it. + if f.deletionMarkMap != nil { + return f.deletionMarkMap + } + + return f.upstream.DeletionMarkBlocks() +} + +// Filter implements block.MetadataFilter. +func (f *IgnoreDeletionMarkFilter) Filter(ctx context.Context, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec) error { + return f.upstream.Filter(ctx, metas, synced) +} + +// FilterWithBucketIndex implements MetadataFilterWithBucketIndex. +func (f *IgnoreDeletionMarkFilter) FilterWithBucketIndex(_ context.Context, metas map[ulid.ULID]*metadata.Meta, idx *bucketindex.Index, synced *extprom.TxGaugeVec) error { + // Build a map of block deletion marks + marks := make(map[ulid.ULID]*metadata.DeletionMark, len(idx.BlockDeletionMarks)) + for _, mark := range idx.BlockDeletionMarks { + marks[mark.ID] = mark.ThanosDeletionMark() + } + + // Keep it cached. + f.deletionMarkMap = marks + + for _, mark := range marks { + if _, ok := metas[mark.ID]; !ok { + continue + } + + if time.Since(time.Unix(mark.DeletionTime, 0)).Seconds() > f.delay.Seconds() { + synced.WithLabelValues(markedForDeletionMeta).Inc() + delete(metas, mark.ID) + } + } + + return nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go index 113dd616235..65391db0c38 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/metadata_fetcher_metrics.go @@ -27,6 +27,8 @@ func NewMetadataFetcherMetrics() *MetadataFetcherMetrics { return &MetadataFetcherMetrics{ regs: util.NewUserRegistries(), + // When mapping new metadata fetcher metrics from Thanos, please remember to add these metrics + // to our internal fetcherMetrics implementation too. syncs: prometheus.NewDesc( "cortex_blocks_meta_syncs_total", "Total blocks metadata synchronization attempts", diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go deleted file mode 100644 index db978bdcd4f..00000000000 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/replication_strategy.go +++ /dev/null @@ -1,45 +0,0 @@ -package storegateway - -import ( - "errors" - "time" - - "github.com/cortexproject/cortex/pkg/ring" -) - -type BlocksReplicationStrategy struct{} - -func (s *BlocksReplicationStrategy) Filter(instances []ring.IngesterDesc, op ring.Operation, _ int, heartbeatTimeout time.Duration, _ bool) ([]ring.IngesterDesc, int, error) { - // Filter out unhealthy instances. - for i := 0; i < len(instances); { - if instances[i].IsHealthy(op, heartbeatTimeout) { - i++ - } else { - instances = append(instances[:i], instances[i+1:]...) - } - } - - // For the store-gateway use case we need that a block is loaded at least on - // 1 instance, no matter what is the replication factor set (no quorum logic). - if len(instances) == 0 { - return nil, 0, errors.New("no healthy store-gateway instance found for the replication set") - } - - maxFailures := len(instances) - 1 - return instances, maxFailures, nil -} - -func (s *BlocksReplicationStrategy) ShouldExtendReplicaSet(instance ring.IngesterDesc, op ring.Operation) bool { - switch op { - case ring.BlocksSync: - // If the instance is JOINING or LEAVING we should extend the replica set: - // - JOINING: the previous replica set should be kept while an instance is JOINING - // - LEAVING: the instance is going to be decommissioned soon so we need to include - // another replica in the set - return instance.GetState() == ring.JOINING || instance.GetState() == ring.LEAVING - case ring.BlocksRead: - return false - default: - return false - } -} diff --git a/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go b/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go index 9945735db4d..6be0c338c5d 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go +++ b/vendor/github.com/cortexproject/cortex/pkg/storegateway/sharding_strategy.go @@ -122,12 +122,11 @@ func (s *ShuffleShardingStrategy) FilterBlocks(_ context.Context, userID string, } func filterBlocksByRingSharding(r ring.ReadRing, instanceAddr string, metas map[ulid.ULID]*metadata.Meta, synced *extprom.TxGaugeVec, logger log.Logger) { - // Buffer internally used by the ring (give extra room for a JOINING + LEAVING instance). - buf := make([]ring.IngesterDesc, 0, r.ReplicationFactor()+2) + bufDescs, bufHosts, bufZones := ring.MakeBuffersForGet() for blockID := range metas { key := cortex_tsdb.HashBlockID(blockID) - set, err := r.Get(key, ring.BlocksSync, buf) + set, err := r.Get(key, BlocksSync, bufDescs, bufHosts, bufZones) // If there are no healthy instances in the replication set or // the replication set for this block doesn't include this instance diff --git a/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go b/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go index 102091c78b6..fa808989077 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go +++ b/vendor/github.com/cortexproject/cortex/pkg/tenant/tenant.go @@ -1,9 +1,13 @@ package tenant import ( + "context" "errors" "fmt" "sort" + "strings" + + "github.com/weaveworks/common/user" ) var ( @@ -64,6 +68,10 @@ func ValidTenantID(s string) error { return nil } +func JoinTenantIDs(tenantIDs []string) string { + return strings.Join(tenantIDs, tenantIDsLabelSeparator) +} + // this checks if a rune is supported in tenant IDs (according to // https://cortexmetrics.io/docs/guides/limitations/#tenant-id-naming) func isSupported(c rune) bool { @@ -87,3 +95,11 @@ func isSupported(c rune) bool { c == '(' || c == ')' } + +// TenantIDsFromOrgID extracts different tenants from an orgID string value +// +// ignore stutter warning +//nolint:golint +func TenantIDsFromOrgID(orgID string) ([]string, error) { + return TenantIDs(user.InjectOrgID(context.TODO(), orgID)) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go b/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go new file mode 100644 index 00000000000..72a880a67d8 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/active_user.go @@ -0,0 +1,91 @@ +package util + +import ( + "sync" + + "go.uber.org/atomic" +) + +// ActiveUsers keeps track of latest user's activity timestamp, +// and allows purging users that are no longer active. +type ActiveUsers struct { + mu sync.RWMutex + timestamps map[string]*atomic.Int64 // As long as unit used by Update and Purge is the same, it doesn't matter what it is. +} + +func NewActiveUsers() *ActiveUsers { + return &ActiveUsers{ + timestamps: map[string]*atomic.Int64{}, + } +} + +func (m *ActiveUsers) UpdateUserTimestamp(userID string, ts int64) { + m.mu.RLock() + u := m.timestamps[userID] + m.mu.RUnlock() + + if u != nil { + u.Store(ts) + return + } + + // Pre-allocate new atomic to avoid doing allocation with lock held. + newAtomic := atomic.NewInt64(ts) + + // We need RW lock to create new entry. + m.mu.Lock() + u = m.timestamps[userID] + + if u != nil { + // Unlock first to reduce contention. + m.mu.Unlock() + + u.Store(ts) + return + } + + m.timestamps[userID] = newAtomic + m.mu.Unlock() +} + +// PurgeInactiveUsers removes users that were last active before given deadline, and returns removed users. +func (m *ActiveUsers) PurgeInactiveUsers(deadline int64) []string { + // Find inactive users with read-lock. + m.mu.RLock() + inactive := make([]string, 0, len(m.timestamps)) + + for userID, ts := range m.timestamps { + if ts.Load() <= deadline { + inactive = append(inactive, userID) + } + } + m.mu.RUnlock() + + if len(inactive) == 0 { + return nil + } + + // Cleanup inactive users. + for ix := 0; ix < len(inactive); { + userID := inactive[ix] + deleted := false + + m.mu.Lock() + u := m.timestamps[userID] + if u != nil && u.Load() <= deadline { + delete(m.timestamps, userID) + deleted = true + } + m.mu.Unlock() + + if deleted { + // keep it in the output + ix++ + } else { + // not really inactive, remove it from output + inactive = append(inactive[:ix], inactive[ix+1:]...) + } + } + + return inactive +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/config.go b/vendor/github.com/cortexproject/cortex/pkg/util/config.go new file mode 100644 index 00000000000..e1032d0f6f1 --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/config.go @@ -0,0 +1,68 @@ +package util + +import ( + "fmt" + "reflect" +) + +// DiffConfig utility function that returns the diff between two config map objects +func DiffConfig(defaultConfig, actualConfig map[interface{}]interface{}) (map[interface{}]interface{}, error) { + output := make(map[interface{}]interface{}) + + for key, value := range actualConfig { + + defaultValue, ok := defaultConfig[key] + if !ok { + output[key] = value + continue + } + + switch v := value.(type) { + case int: + defaultV, ok := defaultValue.(int) + if !ok || defaultV != v { + output[key] = v + } + case string: + defaultV, ok := defaultValue.(string) + if !ok || defaultV != v { + output[key] = v + } + case bool: + defaultV, ok := defaultValue.(bool) + if !ok || defaultV != v { + output[key] = v + } + case []interface{}: + defaultV, ok := defaultValue.([]interface{}) + if !ok || !reflect.DeepEqual(defaultV, v) { + output[key] = v + } + case float64: + defaultV, ok := defaultValue.(float64) + if !ok || !reflect.DeepEqual(defaultV, v) { + output[key] = v + } + case nil: + if defaultValue != nil { + output[key] = v + } + case map[interface{}]interface{}: + defaultV, ok := defaultValue.(map[interface{}]interface{}) + if !ok { + output[key] = value + } + diff, err := DiffConfig(defaultV, v) + if err != nil { + return nil, err + } + if len(diff) > 0 { + output[key] = diff + } + default: + return nil, fmt.Errorf("unsupported type %T", v) + } + } + + return output, nil +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go index 414dce2954f..3b994892ef3 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/flagext/deprecated.go @@ -7,7 +7,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // DeprecatedFlagsUsed is the metric that counts deprecated flags set. @@ -27,7 +27,7 @@ func (deprecatedFlag) String() string { } func (d deprecatedFlag) Set(string) error { - level.Warn(util.Logger).Log("msg", "flag disabled", "flag", d.name) + level.Warn(util_log.Logger).Log("msg", "flag disabled", "flag", d.name) DeprecatedFlagsUsed.Inc() return nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go index 522011fb01c..e876804c4b4 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/grpcclient/grpcclient.go @@ -5,7 +5,6 @@ import ( "time" "github.com/go-kit/kit/log" - "github.com/go-kit/kit/log/level" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" "github.com/pkg/errors" "google.golang.org/grpc" @@ -13,22 +12,23 @@ import ( "google.golang.org/grpc/keepalive" "github.com/cortexproject/cortex/pkg/util" - "github.com/cortexproject/cortex/pkg/util/flagext" "github.com/cortexproject/cortex/pkg/util/grpc/encoding/snappy" "github.com/cortexproject/cortex/pkg/util/tls" ) // Config for a gRPC client. type Config struct { - MaxRecvMsgSize int `yaml:"max_recv_msg_size"` - MaxSendMsgSize int `yaml:"max_send_msg_size"` - UseGzipCompression bool `yaml:"use_gzip_compression"` // TODO: Remove this deprecated option in v1.6.0. - GRPCCompression string `yaml:"grpc_compression"` - RateLimit float64 `yaml:"rate_limit"` - RateLimitBurst int `yaml:"rate_limit_burst"` + MaxRecvMsgSize int `yaml:"max_recv_msg_size"` + MaxSendMsgSize int `yaml:"max_send_msg_size"` + GRPCCompression string `yaml:"grpc_compression"` + RateLimit float64 `yaml:"rate_limit"` + RateLimitBurst int `yaml:"rate_limit_burst"` BackoffOnRatelimits bool `yaml:"backoff_on_ratelimits"` BackoffConfig util.BackoffConfig `yaml:"backoff_config"` + + TLSEnabled bool `yaml:"tls_enabled"` + TLS tls.ClientConfig `yaml:",inline"` } // RegisterFlags registers flags. @@ -40,20 +40,18 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { func (cfg *Config) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { f.IntVar(&cfg.MaxRecvMsgSize, prefix+".grpc-max-recv-msg-size", 100<<20, "gRPC client max receive message size (bytes).") f.IntVar(&cfg.MaxSendMsgSize, prefix+".grpc-max-send-msg-size", 16<<20, "gRPC client max send message size (bytes).") - f.BoolVar(&cfg.UseGzipCompression, prefix+".grpc-use-gzip-compression", false, "Deprecated: Use gzip compression when sending messages. If true, overrides grpc-compression flag.") f.StringVar(&cfg.GRPCCompression, prefix+".grpc-compression", "", "Use compression when sending messages. Supported values are: 'gzip', 'snappy' and '' (disable compression)") f.Float64Var(&cfg.RateLimit, prefix+".grpc-client-rate-limit", 0., "Rate limit for gRPC client; 0 means disabled.") f.IntVar(&cfg.RateLimitBurst, prefix+".grpc-client-rate-limit-burst", 0, "Rate limit burst for gRPC client.") f.BoolVar(&cfg.BackoffOnRatelimits, prefix+".backoff-on-ratelimits", false, "Enable backoff and retry when we hit ratelimits.") + f.BoolVar(&cfg.TLSEnabled, prefix+".tls-enabled", cfg.TLSEnabled, "Enable TLS in the GRPC client. This flag needs to be enabled when any other TLS flag is set. If set to false, insecure connection to gRPC server will be used.") cfg.BackoffConfig.RegisterFlags(prefix, f) + + cfg.TLS.RegisterFlagsWithPrefix(prefix, f) } func (cfg *Config) Validate(log log.Logger) error { - if cfg.UseGzipCompression { - flagext.DeprecatedFlagsUsed.Inc() - level.Warn(log).Log("msg", "running with DEPRECATED option use_gzip_compression, use grpc_compression instead.") - } switch cfg.GRPCCompression { case gzip.Name, snappy.Name, "": // valid @@ -68,18 +66,21 @@ func (cfg *Config) CallOptions() []grpc.CallOption { var opts []grpc.CallOption opts = append(opts, grpc.MaxCallRecvMsgSize(cfg.MaxRecvMsgSize)) opts = append(opts, grpc.MaxCallSendMsgSize(cfg.MaxSendMsgSize)) - compression := cfg.GRPCCompression - if cfg.UseGzipCompression { - compression = gzip.Name - } - if compression != "" { - opts = append(opts, grpc.UseCompressor(compression)) + if cfg.GRPCCompression != "" { + opts = append(opts, grpc.UseCompressor(cfg.GRPCCompression)) } return opts } // DialOption returns the config as a grpc.DialOptions. -func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) []grpc.DialOption { +func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) ([]grpc.DialOption, error) { + var opts []grpc.DialOption + tlsOpts, err := cfg.TLS.GetGRPCDialOptions(cfg.TLSEnabled) + if err != nil { + return nil, err + } + opts = append(opts, tlsOpts...) + if cfg.BackoffOnRatelimits { unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewBackoffRetry(cfg.BackoffConfig)}, unaryClientInterceptors...) } @@ -88,7 +89,8 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep unaryClientInterceptors = append([]grpc.UnaryClientInterceptor{NewRateLimiter(cfg)}, unaryClientInterceptors...) } - return []grpc.DialOption{ + return append( + opts, grpc.WithDefaultCallOptions(cfg.CallOptions()...), grpc.WithUnaryInterceptor(grpc_middleware.ChainUnaryClient(unaryClientInterceptors...)), grpc.WithStreamInterceptor(grpc_middleware.ChainStreamClient(streamClientInterceptors...)), @@ -97,31 +99,5 @@ func (cfg *Config) DialOption(unaryClientInterceptors []grpc.UnaryClientIntercep Timeout: time.Second * 10, PermitWithoutStream: true, }), - } -} - -// ConfigWithTLS is the config for a grpc client with tls -type ConfigWithTLS struct { - GRPC Config `yaml:",inline"` - TLS tls.ClientConfig `yaml:",inline"` -} - -// RegisterFlagsWithPrefix registers flags with prefix. -func (cfg *ConfigWithTLS) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { - cfg.GRPC.RegisterFlagsWithPrefix(prefix, f) - cfg.TLS.RegisterFlagsWithPrefix(prefix, f) -} - -func (cfg *ConfigWithTLS) Validate(log log.Logger) error { - return cfg.GRPC.Validate(log) -} - -// DialOption returns the config as a grpc.DialOptions -func (cfg *ConfigWithTLS) DialOption(unaryClientInterceptors []grpc.UnaryClientInterceptor, streamClientInterceptors []grpc.StreamClientInterceptor) ([]grpc.DialOption, error) { - opts, err := cfg.TLS.GetGRPCDialOptions() - if err != nil { - return nil, err - } - - return append(opts, cfg.GRPC.DialOption(unaryClientInterceptors, streamClientInterceptors)...), nil + ), nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/http.go b/vendor/github.com/cortexproject/cortex/pkg/util/http.go index 369078223a4..f06363e537f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/http.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/http.go @@ -4,31 +4,78 @@ import ( "bytes" "context" "encoding/json" + "flag" "fmt" "html/template" "io" "net/http" "strings" - "github.com/blang/semver" "github.com/gogo/protobuf/proto" "github.com/golang/snappy" "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" + "gopkg.in/yaml.v2" ) +const messageSizeLargerErrFmt = "received message larger than max (%d vs %d)" + +// BasicAuth configures basic authentication for HTTP clients. +type BasicAuth struct { + Username string `yaml:"basic_auth_username"` + Password string `yaml:"basic_auth_password"` +} + +func (b *BasicAuth) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) { + f.StringVar(&b.Username, prefix+"basic-auth-username", "", "HTTP Basic authentication username. It overrides the username set in the URL (if any).") + f.StringVar(&b.Password, prefix+"basic-auth-password", "", "HTTP Basic authentication password. It overrides the password set in the URL (if any).") +} + +// IsEnabled returns false if basic authentication isn't enabled. +func (b BasicAuth) IsEnabled() bool { + return b.Username != "" || b.Password != "" +} + // WriteJSONResponse writes some JSON as a HTTP response. func WriteJSONResponse(w http.ResponseWriter, v interface{}) { + w.Header().Set("Content-Type", "application/json") + data, err := json.Marshal(v) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - if _, err = w.Write(data); err != nil { + + // We ignore errors here, because we cannot do anything about them. + // Write will trigger sending Status code, so we cannot send a different status code afterwards. + // Also this isn't internal error, but error communicating with client. + _, _ = w.Write(data) +} + +// WriteYAMLResponse writes some YAML as a HTTP response. +func WriteYAMLResponse(w http.ResponseWriter, v interface{}) { + // There is not standardised content-type for YAML, text/plain ensures the + // YAML is displayed in the browser instead of offered as a download + w.Header().Set("Content-Type", "text/plain; charset=utf-8") + + data, err := yaml.Marshal(v) + if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } - w.Header().Set("Content-Type", "application/json") + + // We ignore errors here, because we cannot do anything about them. + // Write will trigger sending Status code, so we cannot send a different status code afterwards. + // Also this isn't internal error, but error communicating with client. + _, _ = w.Write(data) +} + +// Sends message as text/plain response with 200 status code. +func WriteTextResponse(w http.ResponseWriter, message string) { + w.Header().Set("Content-Type", "text/plain") + + // Ignore inactionable errors. + _, _ = w.Write([]byte(message)) } // RenderHTTPResponse either responds with json or a rendered html page using the passed in template @@ -52,71 +99,22 @@ type CompressionType int // Values for CompressionType const ( NoCompression CompressionType = iota - FramedSnappy RawSnappy ) -var rawSnappyFromVersion = semver.MustParse("0.1.0") - -// CompressionTypeFor a given version of the Prometheus remote storage protocol. -// See https://github.com/prometheus/prometheus/issues/2692. -func CompressionTypeFor(version string) CompressionType { - ver, err := semver.Make(version) - if err != nil { - return FramedSnappy - } - - if ver.GTE(rawSnappyFromVersion) { - return RawSnappy - } - return FramedSnappy -} - // ParseProtoReader parses a compressed proto from an io.Reader. func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSize int, req proto.Message, compression CompressionType) error { - var body []byte - var err error sp := opentracing.SpanFromContext(ctx) if sp != nil { sp.LogFields(otlog.String("event", "util.ParseProtoRequest[start reading]")) } - var buf bytes.Buffer - if expectedSize > 0 { - if expectedSize > maxSize { - return fmt.Errorf("message expected size larger than max (%d vs %d)", expectedSize, maxSize) - } - buf.Grow(expectedSize + bytes.MinRead) // extra space guarantees no reallocation - } - switch compression { - case NoCompression: - // Read from LimitReader with limit max+1. So if the underlying - // reader is over limit, the result will be bigger than max. - _, err = buf.ReadFrom(io.LimitReader(reader, int64(maxSize)+1)) - body = buf.Bytes() - case FramedSnappy: - _, err = buf.ReadFrom(io.LimitReader(snappy.NewReader(reader), int64(maxSize)+1)) - body = buf.Bytes() - case RawSnappy: - _, err = buf.ReadFrom(reader) - body = buf.Bytes() - if sp != nil { - sp.LogFields(otlog.String("event", "util.ParseProtoRequest[decompress]"), - otlog.Int("size", len(body))) - } - if err == nil && len(body) <= maxSize { - body, err = snappy.Decode(nil, body) - } - } + body, err := decompressRequest(reader, expectedSize, maxSize, compression, sp) if err != nil { return err } - if len(body) > maxSize { - return fmt.Errorf("received message larger than max (%d vs %d)", len(body), maxSize) - } if sp != nil { - sp.LogFields(otlog.String("event", "util.ParseProtoRequest[unmarshal]"), - otlog.Int("size", len(body))) + sp.LogFields(otlog.String("event", "util.ParseProtoRequest[unmarshal]"), otlog.Int("size", len(body))) } // We re-implement proto.Unmarshal here as it calls XXX_Unmarshal first, @@ -134,6 +132,89 @@ func ParseProtoReader(ctx context.Context, reader io.Reader, expectedSize, maxSi return nil } +func decompressRequest(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp opentracing.Span) (body []byte, err error) { + defer func() { + if err != nil && len(body) > maxSize { + err = fmt.Errorf(messageSizeLargerErrFmt, len(body), maxSize) + } + }() + if expectedSize > maxSize { + return nil, fmt.Errorf(messageSizeLargerErrFmt, expectedSize, maxSize) + } + buffer, ok := tryBufferFromReader(reader) + if ok { + body, err = decompressFromBuffer(buffer, maxSize, compression, sp) + return + } + body, err = decompressFromReader(reader, expectedSize, maxSize, compression, sp) + return +} + +func decompressFromReader(reader io.Reader, expectedSize, maxSize int, compression CompressionType, sp opentracing.Span) ([]byte, error) { + var ( + buf bytes.Buffer + body []byte + err error + ) + if expectedSize > 0 { + buf.Grow(expectedSize + bytes.MinRead) // extra space guarantees no reallocation + } + // Read from LimitReader with limit max+1. So if the underlying + // reader is over limit, the result will be bigger than max. + reader = io.LimitReader(reader, int64(maxSize)+1) + switch compression { + case NoCompression: + _, err = buf.ReadFrom(reader) + body = buf.Bytes() + case RawSnappy: + _, err = buf.ReadFrom(reader) + if err != nil { + return nil, err + } + body, err = decompressFromBuffer(&buf, maxSize, RawSnappy, sp) + } + return body, err +} + +func decompressFromBuffer(buffer *bytes.Buffer, maxSize int, compression CompressionType, sp opentracing.Span) ([]byte, error) { + if len(buffer.Bytes()) > maxSize { + return nil, fmt.Errorf(messageSizeLargerErrFmt, len(buffer.Bytes()), maxSize) + } + switch compression { + case NoCompression: + return buffer.Bytes(), nil + case RawSnappy: + if sp != nil { + sp.LogFields(otlog.String("event", "util.ParseProtoRequest[decompress]"), + otlog.Int("size", len(buffer.Bytes()))) + } + size, err := snappy.DecodedLen(buffer.Bytes()) + if err != nil { + return nil, err + } + if size > maxSize { + return nil, fmt.Errorf(messageSizeLargerErrFmt, size, maxSize) + } + body, err := snappy.Decode(nil, buffer.Bytes()) + if err != nil { + return nil, err + } + return body, nil + } + return nil, nil +} + +// tryBufferFromReader attempts to cast the reader to a `*bytes.Buffer` this is possible when using httpgrpc. +// If it fails it will return nil and false. +func tryBufferFromReader(reader io.Reader) (*bytes.Buffer, bool) { + if bufReader, ok := reader.(interface { + BytesBuffer() *bytes.Buffer + }); ok && bufReader != nil { + return bufReader.BytesBuffer(), true + } + return nil, false +} + // SerializeProtoResponse serializes a protobuf response into an HTTP response. func SerializeProtoResponse(w http.ResponseWriter, resp proto.Message, compression CompressionType) error { data, err := proto.Marshal(resp) @@ -144,14 +225,6 @@ func SerializeProtoResponse(w http.ResponseWriter, resp proto.Message, compressi switch compression { case NoCompression: - case FramedSnappy: - buf := bytes.Buffer{} - writer := snappy.NewBufferedWriter(&buf) - if _, err := writer.Write(data); err != nil { - return err - } - writer.Close() - data = buf.Bytes() case RawSnappy: data = snappy.Encode(nil, data) } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/experimental.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/experimental.go similarity index 97% rename from vendor/github.com/cortexproject/cortex/pkg/util/experimental.go rename to vendor/github.com/cortexproject/cortex/pkg/util/log/experimental.go index 6cc163e9deb..3241af692f8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/experimental.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/experimental.go @@ -1,4 +1,4 @@ -package util +package log import ( "github.com/go-kit/kit/log/level" diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go similarity index 65% rename from vendor/github.com/cortexproject/cortex/pkg/util/log.go rename to vendor/github.com/cortexproject/cortex/pkg/util/log/log.go index f04e8065fe9..92ea3f697d1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/log.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/log.go @@ -1,24 +1,22 @@ -package util +package log import ( - "context" "fmt" "os" "github.com/go-kit/kit/log" + kitlog "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/weaveworks/common/logging" - "github.com/weaveworks/common/middleware" "github.com/weaveworks/common/server" - - "github.com/cortexproject/cortex/pkg/tenant" ) var ( // Logger is a shared go-kit logger. // TODO: Change all components to take a non-global logger via their constructors. - Logger = log.NewNopLogger() + // Prefer accepting a non-global logger as an argument. + Logger = kitlog.NewNopLogger() logMessages = prometheus.NewCounterVec(prometheus.CounterOpts{ Name: "log_messages_total", @@ -37,7 +35,7 @@ func init() { prometheus.MustRegister(logMessages) } -// InitLogger initialises the global gokit logger (util.Logger) and overrides the +// InitLogger initialises the global gokit logger (util_log.Logger) and overrides the // default logger for the server. func InitLogger(cfg *server.Config) { l, err := NewPrometheusLogger(cfg.LogLevel, cfg.LogFormat) @@ -45,7 +43,7 @@ func InitLogger(cfg *server.Config) { panic(err) } - // when use util.Logger, skip 3 stack frames. + // when use util_log.Logger, skip 3 stack frames. Logger = log.With(l, "caller", log.Caller(3)) // cfg.Log wraps log function, skip 4 stack frames to get caller information. @@ -97,48 +95,6 @@ func (pl *PrometheusLogger) Log(kv ...interface{}) error { return nil } -// WithContext returns a Logger that has information about the current user in -// its details. -// -// e.g. -// log := util.WithContext(ctx) -// log.Errorf("Could not chunk chunks: %v", err) -func WithContext(ctx context.Context, l log.Logger) log.Logger { - // Weaveworks uses "orgs" and "orgID" to represent Cortex users, - // even though the code-base generally uses `userID` to refer to the same thing. - userID, err := tenant.TenantID(ctx) - if err == nil { - l = WithUserID(userID, l) - } - - traceID, ok := middleware.ExtractTraceID(ctx) - if !ok { - return l - } - - return WithTraceID(traceID, l) -} - -// WithUserID returns a Logger that has information about the current user in -// its details. -func WithUserID(userID string, l log.Logger) log.Logger { - // See note in WithContext. - return log.With(l, "org_id", userID) -} - -// WithTraceID returns a Logger that has information about the traceID in -// its details. -func WithTraceID(traceID string, l log.Logger) log.Logger { - // See note in WithContext. - return log.With(l, "traceID", traceID) -} - -// WithSourceIPs returns a Logger that has information about the source IPs in -// its details. -func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger { - return log.With(l, "sourceIPs", sourceIPs) -} - // CheckFatal prints an error and exits with error code 1 if err is non-nil func CheckFatal(location string, err error) { if err != nil { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go b/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go new file mode 100644 index 00000000000..9c37472728f --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/log/wrappers.go @@ -0,0 +1,53 @@ +package log + +import ( + "context" + + "github.com/go-kit/kit/log" + kitlog "github.com/go-kit/kit/log" + "github.com/weaveworks/common/middleware" + + "github.com/cortexproject/cortex/pkg/tenant" +) + +// WithUserID returns a Logger that has information about the current user in +// its details. +func WithUserID(userID string, l kitlog.Logger) kitlog.Logger { + // See note in WithContext. + return kitlog.With(l, "org_id", userID) +} + +// WithTraceID returns a Logger that has information about the traceID in +// its details. +func WithTraceID(traceID string, l kitlog.Logger) kitlog.Logger { + // See note in WithContext. + return kitlog.With(l, "traceID", traceID) +} + +// WithContext returns a Logger that has information about the current user in +// its details. +// +// e.g. +// log := util.WithContext(ctx) +// log.Errorf("Could not chunk chunks: %v", err) +func WithContext(ctx context.Context, l kitlog.Logger) kitlog.Logger { + // Weaveworks uses "orgs" and "orgID" to represent Cortex users, + // even though the code-base generally uses `userID` to refer to the same thing. + userID, err := tenant.TenantID(ctx) + if err == nil { + l = WithUserID(userID, l) + } + + traceID, ok := middleware.ExtractTraceID(ctx) + if !ok { + return l + } + + return WithTraceID(traceID, l) +} + +// WithSourceIPs returns a Logger that has information about the source IPs in +// its details. +func WithSourceIPs(sourceIPs string, l log.Logger) log.Logger { + return log.With(l, "sourceIPs", sourceIPs) +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/math.go b/vendor/github.com/cortexproject/cortex/pkg/util/math/math.go similarity index 97% rename from vendor/github.com/cortexproject/cortex/pkg/util/math.go rename to vendor/github.com/cortexproject/cortex/pkg/util/math/math.go index 43b3a6d6995..01e544384a7 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/math.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/math/math.go @@ -1,4 +1,4 @@ -package util +package math // Max returns the maximum of two ints func Max(a, b int) int { diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go index 172bfddfcf1..35b248e4af9 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/metrics_helper.go @@ -10,6 +10,10 @@ import ( "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" dto "github.com/prometheus/client_model/go" + "github.com/prometheus/prometheus/pkg/labels" + tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" + + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // Data for single value (counter/gauge) with labels. @@ -582,7 +586,7 @@ func (r *UserRegistries) RemoveUserRegistry(user string, hard bool) { func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool { last, err := ur.reg.Gather() if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) return false } @@ -604,7 +608,7 @@ func (r *UserRegistries) softRemoveUserRegistry(ur *UserRegistry) bool { ur.lastGather, err = NewMetricFamilyMap(last) if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", ur.user, "err", err) return false } @@ -655,9 +659,107 @@ func (r *UserRegistries) BuildMetricFamiliesPerUser() MetricFamiliesPerUser { } if err != nil { - level.Warn(Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err) + level.Warn(util_log.Logger).Log("msg", "failed to gather metrics from registry", "user", entry.user, "err", err) continue } } return data } + +// FromLabelPairsToLabels converts dto.LabelPair into labels.Labels. +func FromLabelPairsToLabels(pairs []*dto.LabelPair) labels.Labels { + builder := labels.NewBuilder(nil) + for _, pair := range pairs { + builder.Set(pair.GetName(), pair.GetValue()) + } + return builder.Labels() +} + +// GetSumOfHistogramSampleCount returns the sum of samples count of histograms matching the provided metric name +// and optional label matchers. Returns 0 if no metric matches. +func GetSumOfHistogramSampleCount(families []*dto.MetricFamily, metricName string, matchers labels.Selector) uint64 { + sum := uint64(0) + + for _, metric := range families { + if metric.GetName() != metricName { + continue + } + + if metric.GetType() != dto.MetricType_HISTOGRAM { + continue + } + + for _, series := range metric.GetMetric() { + if !matchers.Matches(FromLabelPairsToLabels(series.GetLabel())) { + continue + } + + histogram := series.GetHistogram() + sum += histogram.GetSampleCount() + } + } + + return sum +} + +// GetLables returns list of label combinations used by this collector at the time of call. +// This can be used to find and delete unused metrics. +func GetLabels(c prometheus.Collector, filter map[string]string) ([]labels.Labels, error) { + ch := make(chan prometheus.Metric, 16) + + go func() { + defer close(ch) + c.Collect(ch) + }() + + errs := tsdb_errors.NewMulti() + var result []labels.Labels + dtoMetric := &dto.Metric{} + +nextMetric: + for m := range ch { + err := m.Write(dtoMetric) + if err != nil { + errs.Add(err) + // We cannot return here, to avoid blocking goroutine calling c.Collect() + continue + } + + lbls := labels.NewBuilder(nil) + for _, lp := range dtoMetric.Label { + n := lp.GetName() + v := lp.GetValue() + + filterValue, ok := filter[n] + if ok && filterValue != v { + continue nextMetric + } + + lbls.Set(lp.GetName(), lp.GetValue()) + } + result = append(result, lbls.Labels()) + } + + return result, errs.Err() +} + +// DeleteMatchingLabels removes metric with labels matching the filter. +func DeleteMatchingLabels(c CollectorVec, filter map[string]string) error { + lbls, err := GetLabels(c, filter) + if err != nil { + return err + } + + for _, ls := range lbls { + c.Delete(ls.Map()) + } + + return nil +} + +// CollectorVec is a collector that can delete metrics by labels. +// Implemented by *prometheus.MetricVec (used by CounterVec, GaugeVec, SummaryVec, and HistogramVec). +type CollectorVec interface { + prometheus.Collector + Delete(labels prometheus.Labels) bool +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go index be12157ac26..0d4fb43d1f2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/module_service.go @@ -7,6 +7,7 @@ import ( "github.com/go-kit/kit/log/level" "github.com/pkg/errors" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -45,7 +46,7 @@ func (w *moduleService) start(serviceContext context.Context) error { continue } - level.Debug(Logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m) + level.Debug(util_log.Logger).Log("msg", "module waiting for initialization", "module", w.name, "waiting_for", m) err := s.AwaitRunning(serviceContext) if err != nil { @@ -55,7 +56,7 @@ func (w *moduleService) start(serviceContext context.Context) error { // we don't want to let this service to stop until all dependant services are stopped, // so we use independent context here - level.Info(Logger).Log("msg", "initialising", "module", w.name) + level.Info(util_log.Logger).Log("msg", "initialising", "module", w.name) err := w.service.StartAsync(context.Background()) if err != nil { return errors.Wrapf(err, "error starting module: %s", w.name) @@ -77,7 +78,7 @@ func (w *moduleService) stop(_ error) error { // Only wait for other modules, if underlying service is still running. w.waitForModulesToStop() - level.Debug(Logger).Log("msg", "stopping", "module", w.name) + level.Debug(util_log.Logger).Log("msg", "stopping", "module", w.name) err = services.StopAndAwaitTerminated(context.Background(), w.service) } else { @@ -85,9 +86,9 @@ func (w *moduleService) stop(_ error) error { } if err != nil && err != ErrStopProcess { - level.Warn(Logger).Log("msg", "module failed with error", "module", w.name, "err", err) + level.Warn(util_log.Logger).Log("msg", "module failed with error", "module", w.name, "err", err) } else { - level.Info(Logger).Log("msg", "module stopped", "module", w.name) + level.Info(util_log.Logger).Log("msg", "module stopped", "module", w.name) } return err } @@ -100,7 +101,7 @@ func (w *moduleService) waitForModulesToStop() { continue } - level.Debug(Logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n) + level.Debug(util_log.Logger).Log("msg", "module waiting for", "module", w.name, "waiting_for", n) // Passed context isn't canceled, so we can only get error here, if service // fails. But we don't care *how* service stops, as long as it is done. _ = s.AwaitTerminated(context.Background()) diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/net.go b/vendor/github.com/cortexproject/cortex/pkg/util/net.go index e0fa12e6ffa..f4cd184870f 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/net.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/net.go @@ -5,6 +5,8 @@ import ( "net" "github.com/go-kit/kit/log/level" + + util_log "github.com/cortexproject/cortex/pkg/util/log" ) // GetFirstAddressOf returns the first IPv4 address of the supplied interface names. @@ -12,17 +14,17 @@ func GetFirstAddressOf(names []string) (string, error) { for _, name := range names { inf, err := net.InterfaceByName(name) if err != nil { - level.Warn(Logger).Log("msg", "error getting interface", "inf", name, "err", err) + level.Warn(util_log.Logger).Log("msg", "error getting interface", "inf", name, "err", err) continue } addrs, err := inf.Addrs() if err != nil { - level.Warn(Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) + level.Warn(util_log.Logger).Log("msg", "error getting addresses for interface", "inf", name, "err", err) continue } if len(addrs) <= 0 { - level.Warn(Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) + level.Warn(util_log.Logger).Log("msg", "no addresses found for interface", "inf", name, "err", err) continue } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go b/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go index 78c2c825981..ed7829b19e6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/push/push.go @@ -8,31 +8,35 @@ import ( "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/middleware" - "github.com/cortexproject/cortex/pkg/distributor" "github.com/cortexproject/cortex/pkg/ingester/client" "github.com/cortexproject/cortex/pkg/util" + "github.com/cortexproject/cortex/pkg/util/log" ) +// Func defines the type of the push. It is similar to http.HandlerFunc. +type Func func(context.Context, *client.WriteRequest) (*client.WriteResponse, error) + // Handler is a http.Handler which accepts WriteRequests. -func Handler(cfg distributor.Config, sourceIPs *middleware.SourceIPExtractor, push func(context.Context, *client.WriteRequest) (*client.WriteResponse, error)) http.Handler { +func Handler(maxRecvMsgSize int, sourceIPs *middleware.SourceIPExtractor, push Func) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { ctx := r.Context() - logger := util.WithContext(ctx, util.Logger) + logger := log.WithContext(ctx, log.Logger) if sourceIPs != nil { source := sourceIPs.Get(r) if source != "" { ctx = util.AddSourceIPsToOutgoingContext(ctx, source) - logger = util.WithSourceIPs(source, logger) + logger = log.WithSourceIPs(source, logger) } } - compressionType := util.CompressionTypeFor(r.Header.Get("X-Prometheus-Remote-Write-Version")) var req client.PreallocWriteRequest - err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), cfg.MaxRecvMsgSize, &req, compressionType) + err := util.ParseProtoReader(ctx, r.Body, int(r.ContentLength), maxRecvMsgSize, &req, util.RawSnappy) if err != nil { level.Error(logger).Log("err", err.Error()) http.Error(w, err.Error(), http.StatusBadRequest) return } + + req.SkipLabelNameValidation = false if req.Source == 0 { req.Source = client.API } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go index ca4f34c974e..6447508cde1 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/runtimeconfig/manager.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "crypto/sha256" - "errors" "flag" "fmt" "io" @@ -13,10 +12,11 @@ import ( "time" "github.com/go-kit/kit/log/level" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" "github.com/cortexproject/cortex/pkg/util/services" ) @@ -74,18 +74,16 @@ func NewRuntimeConfigManager(cfg ManagerConfig, registerer prometheus.Registerer }, []string{"sha256"}), } - mgr.Service = services.NewBasicService(mgr.start, mgr.loop, mgr.stop) + mgr.Service = services.NewBasicService(mgr.starting, mgr.loop, mgr.stopping) return &mgr, nil } -func (om *Manager) start(_ context.Context) error { - if om.cfg.LoadPath != "" { - if err := om.loadConfig(); err != nil { - // Log but don't stop on error - we don't want to halt all ingesters because of a typo - level.Error(util.Logger).Log("msg", "failed to load config", "err", err) - } +func (om *Manager) starting(_ context.Context) error { + if om.cfg.LoadPath == "" { + return nil } - return nil + + return errors.Wrap(om.loadConfig(), "failed to load runtime config") } // CreateListenerChannel creates new channel that can be used to receive new config values. @@ -120,7 +118,7 @@ func (om *Manager) CloseListenerChannel(listener <-chan interface{}) { func (om *Manager) loop(ctx context.Context) error { if om.cfg.LoadPath == "" { - level.Info(util.Logger).Log("msg", "runtime config disabled: file not specified") + level.Info(util_log.Logger).Log("msg", "runtime config disabled: file not specified") <-ctx.Done() return nil } @@ -134,7 +132,7 @@ func (om *Manager) loop(ctx context.Context) error { err := om.loadConfig() if err != nil { // Log but don't stop on error - we don't want to halt all ingesters because of a typo - level.Error(util.Logger).Log("msg", "failed to load config", "err", err) + level.Error(util_log.Logger).Log("msg", "failed to load config", "err", err) } case <-ctx.Done(): return nil @@ -148,14 +146,14 @@ func (om *Manager) loadConfig() error { buf, err := ioutil.ReadFile(om.cfg.LoadPath) if err != nil { om.configLoadSuccess.Set(0) - return err + return errors.Wrap(err, "read file") } hash := sha256.Sum256(buf) cfg, err := om.cfg.Loader(bytes.NewReader(buf)) if err != nil { om.configLoadSuccess.Set(0) - return err + return errors.Wrap(err, "load file") } om.configLoadSuccess.Set(1) @@ -190,7 +188,7 @@ func (om *Manager) callListeners(newValue interface{}) { } // Stop stops the Manager -func (om *Manager) stop(_ error) error { +func (om *Manager) stopping(_ error) error { om.listenersMtx.Lock() defer om.listenersMtx.Unlock() diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go index 056e59ae271..4b6131d45c6 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/spanlogger/spanlogger.go @@ -9,7 +9,7 @@ import ( "github.com/opentracing/opentracing-go/ext" otlog "github.com/opentracing/opentracing-go/log" - "github.com/cortexproject/cortex/pkg/util" + util_log "github.com/cortexproject/cortex/pkg/util/log" ) type loggerCtxMarker struct{} @@ -26,7 +26,7 @@ type SpanLogger struct { // New makes a new SpanLogger, where logs will be sent to the global logger. func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, context.Context) { - return NewWithLogger(ctx, util.Logger, method, kvps...) + return NewWithLogger(ctx, util_log.Logger, method, kvps...) } // NewWithLogger makes a new SpanLogger with a custom log.Logger to send logs @@ -35,7 +35,7 @@ func New(ctx context.Context, method string, kvps ...interface{}) (*SpanLogger, func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...interface{}) (*SpanLogger, context.Context) { span, ctx := opentracing.StartSpanFromContext(ctx, method) logger := &SpanLogger{ - Logger: log.With(util.WithContext(ctx, l), "method", method), + Logger: log.With(util_log.WithContext(ctx, l), "method", method), Span: span, } if len(kvps) > 0 { @@ -51,7 +51,7 @@ func NewWithLogger(ctx context.Context, l log.Logger, method string, kvps ...int // in the context. If the context doesn't have a logger, the global logger // is used. func FromContext(ctx context.Context) *SpanLogger { - return FromContextWithFallback(ctx, util.Logger) + return FromContextWithFallback(ctx, util_log.Logger) } // FromContextWithFallback returns a span logger using the current parent span. @@ -68,7 +68,7 @@ func FromContextWithFallback(ctx context.Context, fallback log.Logger) *SpanLogg sp = defaultNoopSpan } return &SpanLogger{ - Logger: util.WithContext(ctx, logger), + Logger: util_log.WithContext(ctx, logger), Span: sp, } } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/strings.go b/vendor/github.com/cortexproject/cortex/pkg/util/strings.go index 39868e1d1cb..e4c93bc07a8 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/strings.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/strings.go @@ -10,3 +10,12 @@ func StringsContain(values []string, search string) bool { return false } + +// StringsMap returns a map where keys are input values. +func StringsMap(values []string) map[string]bool { + out := make(map[string]bool, len(values)) + for _, v := range values { + out[v] = true + } + return out +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/tls/tls.go b/vendor/github.com/cortexproject/cortex/pkg/util/tls/tls.go index 28b36941dfc..9886b208ddc 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/tls/tls.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/tls/tls.go @@ -16,6 +16,7 @@ type ClientConfig struct { CertPath string `yaml:"tls_cert_path"` KeyPath string `yaml:"tls_key_path"` CAPath string `yaml:"tls_ca_path"` + ServerName string `yaml:"tls_server_name"` InsecureSkipVerify bool `yaml:"tls_insecure_skip_verify"` } @@ -29,18 +30,15 @@ func (cfg *ClientConfig) RegisterFlagsWithPrefix(prefix string, f *flag.FlagSet) f.StringVar(&cfg.CertPath, prefix+".tls-cert-path", "", "Path to the client certificate file, which will be used for authenticating with the server. Also requires the key path to be configured.") f.StringVar(&cfg.KeyPath, prefix+".tls-key-path", "", "Path to the key file for the client certificate. Also requires the client certificate to be configured.") f.StringVar(&cfg.CAPath, prefix+".tls-ca-path", "", "Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used.") + f.StringVar(&cfg.ServerName, prefix+".tls-server-name", "", "Override the expected name on the server certificate.") f.BoolVar(&cfg.InsecureSkipVerify, prefix+".tls-insecure-skip-verify", false, "Skip validating server certificate.") } // GetTLSConfig initialises tls.Config from config options func (cfg *ClientConfig) GetTLSConfig() (*tls.Config, error) { - // no tls config given at all - if cfg.CertPath == "" && cfg.KeyPath == "" && cfg.CAPath == "" { - return nil, nil - } - config := &tls.Config{ InsecureSkipVerify: cfg.InsecureSkipVerify, + ServerName: cfg.ServerName, } // read ca certificates @@ -75,11 +73,15 @@ func (cfg *ClientConfig) GetTLSConfig() (*tls.Config, error) { } // GetGRPCDialOptions creates GRPC DialOptions for TLS -func (cfg *ClientConfig) GetGRPCDialOptions() ([]grpc.DialOption, error) { - if tlsConfig, err := cfg.GetTLSConfig(); err != nil { +func (cfg *ClientConfig) GetGRPCDialOptions(enabled bool) ([]grpc.DialOption, error) { + if !enabled { + return []grpc.DialOption{grpc.WithInsecure()}, nil + } + + tlsConfig, err := cfg.GetTLSConfig() + if err != nil { return nil, errors.Wrap(err, "error creating grpc dial options") - } else if tlsConfig != nil { - return []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))}, nil } - return []grpc.DialOption{grpc.WithInsecure()}, nil + + return []grpc.DialOption{grpc.WithTransportCredentials(credentials.NewTLS(tlsConfig))}, nil } diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go index 1a99f9d8f16..66034e9c74a 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/limits.go @@ -5,6 +5,7 @@ import ( "flag" "time" + "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/relabel" "github.com/cortexproject/cortex/pkg/util/flagext" @@ -37,6 +38,7 @@ type Limits struct { AcceptHASamples bool `yaml:"accept_ha_samples"` HAClusterLabel string `yaml:"ha_cluster_label"` HAReplicaLabel string `yaml:"ha_replica_label"` + HAMaxClusters int `yaml:"ha_max_clusters"` DropLabels flagext.StringSlice `yaml:"drop_labels"` MaxLabelNameLength int `yaml:"max_label_name_length"` MaxLabelValueLength int `yaml:"max_label_value_length"` @@ -66,13 +68,13 @@ type Limits struct { MaxGlobalMetadataPerMetric int `yaml:"max_global_metadata_per_metric"` // Querier enforced limits. - MaxChunksPerQuery int `yaml:"max_chunks_per_query"` - MaxQueryLookback time.Duration `yaml:"max_query_lookback"` - MaxQueryLength time.Duration `yaml:"max_query_length"` - MaxQueryParallelism int `yaml:"max_query_parallelism"` - CardinalityLimit int `yaml:"cardinality_limit"` - MaxCacheFreshness time.Duration `yaml:"max_cache_freshness"` - MaxQueriersPerTenant int `yaml:"max_queriers_per_tenant"` + MaxChunksPerQuery int `yaml:"max_chunks_per_query"` + MaxQueryLookback model.Duration `yaml:"max_query_lookback"` + MaxQueryLength time.Duration `yaml:"max_query_length"` + MaxQueryParallelism int `yaml:"max_query_parallelism"` + CardinalityLimit int `yaml:"cardinality_limit"` + MaxCacheFreshness time.Duration `yaml:"max_cache_freshness"` + MaxQueriersPerTenant int `yaml:"max_queriers_per_tenant"` // Ruler defaults and limits. RulerEvaluationDelay time.Duration `yaml:"ruler_evaluation_delay_duration"` @@ -97,6 +99,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.BoolVar(&l.AcceptHASamples, "distributor.ha-tracker.enable-for-all-users", false, "Flag to enable, for all users, handling of samples with external labels identifying replicas in an HA Prometheus setup.") f.StringVar(&l.HAClusterLabel, "distributor.ha-tracker.cluster", "cluster", "Prometheus label to look for in samples to identify a Prometheus HA cluster.") f.StringVar(&l.HAReplicaLabel, "distributor.ha-tracker.replica", "__replica__", "Prometheus label to look for in samples to identify a Prometheus HA replica.") + f.IntVar(&l.HAMaxClusters, "distributor.ha-tracker.max-clusters", 0, "Maximum number of clusters that HA tracker will keep track of for single user. 0 to disable the limit.") f.Var(&l.DropLabels, "distributor.drop-label", "This flag can be used to specify label names that to drop during sample ingestion within the distributor and can be repeated in order to drop multiple labels.") f.IntVar(&l.MaxLabelNameLength, "validation.max-length-label-name", 1024, "Maximum length accepted for label names") f.IntVar(&l.MaxLabelValueLength, "validation.max-length-label-value", 2048, "Maximum length accepted for label value. This setting also applies to the metric name") @@ -123,7 +126,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxChunksPerQuery, "store.query-chunk-limit", 2e6, "Maximum number of chunks that can be fetched in a single query. This limit is enforced when fetching chunks from the long-term storage. When running the Cortex chunks storage, this limit is enforced in the querier, while when running the Cortex blocks storage this limit is both enforced in the querier and store-gateway. 0 to disable.") f.DurationVar(&l.MaxQueryLength, "store.max-query-length", 0, "Limit the query time range (end - start time). This limit is enforced in the query-frontend (on the received query), in the querier (on the query possibly split by the query-frontend) and in the chunks storage. 0 to disable.") - f.DurationVar(&l.MaxQueryLookback, "querier.max-query-lookback", 0, "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") + f.Var(&l.MaxQueryLookback, "querier.max-query-lookback", "Limit how long back data (series and metadata) can be queried, up until duration ago. This limit is enforced in the query-frontend, querier and ruler. If the requested time range is outside the allowed range, the request will not fail but will be manipulated to only query data within the allowed time range. 0 to disable.") f.IntVar(&l.MaxQueryParallelism, "querier.max-query-parallelism", 14, "Maximum number of split queries will be scheduled in parallel by the frontend.") f.IntVar(&l.CardinalityLimit, "store.cardinality-limit", 1e5, "Cardinality limit for index queries. This limit is ignored when running the Cortex blocks storage. 0 to disable.") f.DurationVar(&l.MaxCacheFreshness, "frontend.max-cache-freshness", 1*time.Minute, "Most recent allowed cacheable result per-tenant, to prevent caching very recent results that might still be in flux.") @@ -312,7 +315,7 @@ func (o *Overrides) MaxChunksPerQuery(userID string) int { // MaxQueryLookback returns the max lookback period of queries. func (o *Overrides) MaxQueryLookback(userID string) time.Duration { - return o.getOverridesForUser(userID).MaxQueryLookback + return time.Duration(o.getOverridesForUser(userID).MaxQueryLookback) } // MaxQueryLength returns the limit of the length (in time) of a query. @@ -412,6 +415,11 @@ func (o *Overrides) StoreGatewayTenantShardSize(userID string) int { return o.getOverridesForUser(userID).StoreGatewayTenantShardSize } +// MaxHAClusters returns maximum number of clusters that HA tracker will track for a user. +func (o *Overrides) MaxHAClusters(user string) int { + return o.getOverridesForUser(user).HAMaxClusters +} + func (o *Overrides) getOverridesForUser(userID string) *Limits { if o.tenantLimits != nil { l := o.tenantLimits(userID) @@ -421,3 +429,68 @@ func (o *Overrides) getOverridesForUser(userID string) *Limits { } return o.defaultLimits } + +// SmallestPositiveIntPerTenant is returning the minimal positive value of the +// supplied limit function for all given tenants. +func SmallestPositiveIntPerTenant(tenantIDs []string, f func(string) int) int { + var result *int + for _, tenantID := range tenantIDs { + v := f(tenantID) + if result == nil || v < *result { + result = &v + } + } + if result == nil { + return 0 + } + return *result +} + +// SmallestPositiveNonZeroIntPerTenant is returning the minimal positive and +// non-zero value of the supplied limit function for all given tenants. In many +// limits a value of 0 means unlimted so the method will return 0 only if all +// inputs have a limit of 0 or an empty tenant list is given. +func SmallestPositiveNonZeroIntPerTenant(tenantIDs []string, f func(string) int) int { + var result *int + for _, tenantID := range tenantIDs { + v := f(tenantID) + if v > 0 && (result == nil || v < *result) { + result = &v + } + } + if result == nil { + return 0 + } + return *result +} + +// SmallestPositiveNonZeroDurationPerTenant is returning the minimal positive +// and non-zero value of the supplied limit function for all given tenants. In +// many limits a value of 0 means unlimted so the method will return 0 only if +// all inputs have a limit of 0 or an empty tenant list is given. +func SmallestPositiveNonZeroDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration { + var result *time.Duration + for _, tenantID := range tenantIDs { + v := f(tenantID) + if v > 0 && (result == nil || v < *result) { + result = &v + } + } + if result == nil { + return 0 + } + return *result +} + +// MaxDurationPerTenant is returning the maximum duration per tenant. Without +// tenants given it will return a time.Duration(0). +func MaxDurationPerTenant(tenantIDs []string, f func(string) time.Duration) time.Duration { + result := time.Duration(0) + for _, tenantID := range tenantIDs { + v := f(tenantID) + if v > result { + result = v + } + } + return result +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go index 3ae4f54b4f8..477d0e934a2 100644 --- a/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go +++ b/vendor/github.com/cortexproject/cortex/pkg/util/validation/validate.go @@ -6,11 +6,14 @@ import ( "strings" "time" + "github.com/go-kit/kit/log" + "github.com/go-kit/kit/log/level" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/weaveworks/common/httpgrpc" "github.com/cortexproject/cortex/pkg/ingester/client" + "github.com/cortexproject/cortex/pkg/util" "github.com/cortexproject/cortex/pkg/util/extract" ) @@ -33,7 +36,7 @@ const ( errInvalidLabel = "sample invalid label: %.200q metric %.200q" errLabelNameTooLong = "label name too long: %.200q metric %.200q" errLabelValueTooLong = "label value too long: %.200q metric %.200q" - errTooManyLabels = "sample for '%s' has %d label names; limit %d" + errTooManyLabels = "series has too many labels (actual: %d, limit: %d) series: '%s'" errTooOld = "sample for '%s' has timestamp too old: %d" errTooNew = "sample for '%s' has timestamp too new: %d" errDuplicateLabelName = "duplicate label name: %.200q metric %.200q" @@ -56,6 +59,9 @@ const ( // RateLimited is one of the values for the reason to discard samples. // Declared here to avoid duplication in ingester and distributor. RateLimited = "rate_limited" + + // Too many HA clusters is one of the reasons for discarding samples. + TooManyHAClusters = "too_many_ha_clusters" ) // DiscardedSamples is a metric of the number of discarded samples, by reason. @@ -129,7 +135,7 @@ func ValidateLabels(cfg LabelValidationConfig, userID string, ls []client.LabelA numLabelNames := len(ls) if numLabelNames > cfg.MaxLabelNamesPerSeries(userID) { DiscardedSamples.WithLabelValues(maxLabelNamesPerSeries, userID).Inc() - return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, client.FromLabelAdaptersToMetric(ls).String(), numLabelNames, cfg.MaxLabelNamesPerSeries(userID)) + return httpgrpc.Errorf(http.StatusBadRequest, errTooManyLabels, numLabelNames, cfg.MaxLabelNamesPerSeries(userID), client.FromLabelAdaptersToMetric(ls).String()) } maxLabelNameLength := cfg.MaxLabelNameLength(userID) @@ -235,3 +241,14 @@ func formatLabelSet(ls []client.LabelAdapter) string { return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) } + +func DeletePerUserValidationMetrics(userID string, log log.Logger) { + filter := map[string]string{"user": userID} + + if err := util.DeleteMatchingLabels(DiscardedSamples, filter); err != nil { + level.Warn(log).Log("msg", "failed to remove cortex_discarded_samples_total metric for user", "user", userID, "err", err) + } + if err := util.DeleteMatchingLabels(DiscardedMetadata, filter); err != nil { + level.Warn(log).Log("msg", "failed to remove cortex_discarded_metadata_total metric for user", "user", userID, "err", err) + } +} diff --git a/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go b/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go new file mode 100644 index 00000000000..bb8b4d802ab --- /dev/null +++ b/vendor/github.com/cortexproject/cortex/pkg/util/yaml.go @@ -0,0 +1,19 @@ +package util + +import "gopkg.in/yaml.v2" + +// YAMLMarshalUnmarshal utility function that converts a YAML interface in a map +// doing marshal and unmarshal of the parameter +func YAMLMarshalUnmarshal(in interface{}) (map[interface{}]interface{}, error) { + yamlBytes, err := yaml.Marshal(in) + if err != nil { + return nil, err + } + + object := make(map[interface{}]interface{}) + if err := yaml.Unmarshal(yamlBytes, object); err != nil { + return nil, err + } + + return object, nil +} diff --git a/vendor/github.com/golang/snappy/decode_arm64.s b/vendor/github.com/golang/snappy/decode_arm64.s index bfafa0ccfce..7a3ead17eac 100644 --- a/vendor/github.com/golang/snappy/decode_arm64.s +++ b/vendor/github.com/golang/snappy/decode_arm64.s @@ -70,7 +70,7 @@ loop: // x := uint32(src[s] >> 2) // switch MOVW $60, R1 - ADD R4>>2, ZR, R4 + LSRW $2, R4, R4 CMPW R4, R1 BLS tagLit60Plus @@ -111,13 +111,12 @@ doLit: // is contiguous in memory and so it needs to leave enough source bytes to // read the next tag without refilling buffers, but Go's Decode assumes // contiguousness (the src argument is a []byte). - MOVD $16, R1 - CMP R1, R4 - BGT callMemmove - CMP R1, R2 - BLT callMemmove - CMP R1, R3 - BLT callMemmove + CMP $16, R4 + BGT callMemmove + CMP $16, R2 + BLT callMemmove + CMP $16, R3 + BLT callMemmove // !!! Implement the copy from src to dst as a 16-byte load and store. // (Decode's documentation says that dst and src must not overlap.) @@ -130,9 +129,8 @@ doLit: // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - - VLD1 0(R6), [V0.B16] - VST1 [V0.B16], 0(R7) + LDP 0(R6), (R14, R15) + STP (R14, R15), 0(R7) // d += length // s += length @@ -210,8 +208,7 @@ tagLit61: B doLit tagLit62Plus: - MOVW $62, R1 - CMPW R1, R4 + CMPW $62, R4 BHI tagLit63 // case x == 62: @@ -273,10 +270,9 @@ tagCopy: // We have a copy tag. We assume that: // - R3 == src[s] & 0x03 // - R4 == src[s] - MOVD $2, R1 - CMP R1, R3 - BEQ tagCopy2 - BGT tagCopy4 + CMP $2, R3 + BEQ tagCopy2 + BGT tagCopy4 // case tagCopy1: // s += 2 @@ -346,13 +342,11 @@ doCopy: // } // copy 16 bytes // d += length - MOVD $16, R1 - MOVD $8, R0 - CMP R1, R4 + CMP $16, R4 BGT slowForwardCopy - CMP R0, R5 + CMP $8, R5 BLT slowForwardCopy - CMP R1, R14 + CMP $16, R14 BLT slowForwardCopy MOVD 0(R15), R2 MOVD R2, 0(R7) @@ -426,8 +420,7 @@ makeOffsetAtLeast8: // // The two previous lines together means that d-offset, and therefore // // R15, is unchanged. // } - MOVD $8, R1 - CMP R1, R5 + CMP $8, R5 BGE fixUpSlowForwardCopy MOVD (R15), R3 MOVD R3, (R7) @@ -477,9 +470,7 @@ verySlowForwardCopy: ADD $1, R15, R15 ADD $1, R7, R7 SUB $1, R4, R4 - MOVD $0, R1 - CMP R1, R4 - BNE verySlowForwardCopy + CBNZ R4, verySlowForwardCopy B loop // The code above handles copy tags. diff --git a/vendor/github.com/golang/snappy/encode_arm64.s b/vendor/github.com/golang/snappy/encode_arm64.s index 1f565ee75f2..bf83667d711 100644 --- a/vendor/github.com/golang/snappy/encode_arm64.s +++ b/vendor/github.com/golang/snappy/encode_arm64.s @@ -35,11 +35,9 @@ TEXT ·emitLiteral(SB), NOSPLIT, $32-56 MOVW R3, R4 SUBW $1, R4, R4 - MOVW $60, R2 - CMPW R2, R4 + CMPW $60, R4 BLT oneByte - MOVW $256, R2 - CMPW R2, R4 + CMPW $256, R4 BLT twoBytes threeBytes: @@ -98,8 +96,7 @@ TEXT ·emitCopy(SB), NOSPLIT, $0-48 loop0: // for length >= 68 { etc } - MOVW $68, R2 - CMPW R2, R3 + CMPW $68, R3 BLT step1 // Emit a length 64 copy, encoded as 3 bytes. @@ -112,9 +109,8 @@ loop0: step1: // if length > 64 { etc } - MOVD $64, R2 - CMP R2, R3 - BLE step2 + CMP $64, R3 + BLE step2 // Emit a length 60 copy, encoded as 3 bytes. MOVD $0xee, R2 @@ -125,11 +121,9 @@ step1: step2: // if length >= 12 || offset >= 2048 { goto step3 } - MOVD $12, R2 - CMP R2, R3 + CMP $12, R3 BGE step3 - MOVW $2048, R2 - CMPW R2, R11 + CMPW $2048, R11 BGE step3 // Emit the remaining copy, encoded as 2 bytes. @@ -295,27 +289,24 @@ varTable: // var table [maxTableSize]uint16 // // In the asm code, unlike the Go code, we can zero-initialize only the - // first tableSize elements. Each uint16 element is 2 bytes and each VST1 - // writes 64 bytes, so we can do only tableSize/32 writes instead of the - // 2048 writes that would zero-initialize all of table's 32768 bytes. - // This clear could overrun the first tableSize elements, but it won't - // overrun the allocated stack size. + // first tableSize elements. Each uint16 element is 2 bytes and each + // iterations writes 64 bytes, so we can do only tableSize/32 writes + // instead of the 2048 writes that would zero-initialize all of table's + // 32768 bytes. This clear could overrun the first tableSize elements, but + // it won't overrun the allocated stack size. ADD $128, RSP, R17 MOVD R17, R4 // !!! R6 = &src[tableSize] ADD R6<<1, R17, R6 - // zero the SIMD registers - VEOR V0.B16, V0.B16, V0.B16 - VEOR V1.B16, V1.B16, V1.B16 - VEOR V2.B16, V2.B16, V2.B16 - VEOR V3.B16, V3.B16, V3.B16 - memclr: - VST1.P [V0.B16, V1.B16, V2.B16, V3.B16], 64(R4) - CMP R4, R6 - BHI memclr + STP.P (ZR, ZR), 64(R4) + STP (ZR, ZR), -48(R4) + STP (ZR, ZR), -32(R4) + STP (ZR, ZR), -16(R4) + CMP R4, R6 + BHI memclr // !!! R6 = &src[0] MOVD R7, R6 @@ -404,8 +395,7 @@ fourByteMatch: // on inputMargin in encode.go. MOVD R7, R3 SUB R10, R3, R3 - MOVD $16, R2 - CMP R2, R3 + CMP $16, R3 BLE emitLiteralFastPath // ---------------------------------------- @@ -454,18 +444,21 @@ inlineEmitLiteralMemmove: MOVD R3, 24(RSP) // Finish the "d +=" part of "d += emitLiteral(etc)". - ADD R3, R8, R8 - MOVD R7, 80(RSP) - MOVD R8, 88(RSP) - MOVD R15, 120(RSP) - CALL runtime·memmove(SB) - MOVD 64(RSP), R5 - MOVD 72(RSP), R6 - MOVD 80(RSP), R7 - MOVD 88(RSP), R8 - MOVD 96(RSP), R9 - MOVD 120(RSP), R15 - B inner1 + ADD R3, R8, R8 + MOVD R7, 80(RSP) + MOVD R8, 88(RSP) + MOVD R15, 120(RSP) + CALL runtime·memmove(SB) + MOVD 64(RSP), R5 + MOVD 72(RSP), R6 + MOVD 80(RSP), R7 + MOVD 88(RSP), R8 + MOVD 96(RSP), R9 + MOVD 120(RSP), R15 + ADD $128, RSP, R17 + MOVW $0xa7bd, R16 + MOVKW $(0x1e35<<16), R16 + B inner1 inlineEmitLiteralEnd: // End inline of the emitLiteral call. @@ -489,9 +482,9 @@ emitLiteralFastPath: // Note that on arm64, it is legal and cheap to issue unaligned 8-byte or // 16-byte loads and stores. This technique probably wouldn't be as // effective on architectures that are fussier about alignment. - VLD1 0(R10), [V0.B16] - VST1 [V0.B16], 0(R8) - ADD R3, R8, R8 + LDP 0(R10), (R0, R1) + STP (R0, R1), 0(R8) + ADD R3, R8, R8 inner1: // for { etc } diff --git a/vendor/github.com/google/go-cmp/cmp/compare.go b/vendor/github.com/google/go-cmp/cmp/compare.go index 6656186846e..86d0903b8b5 100644 --- a/vendor/github.com/google/go-cmp/cmp/compare.go +++ b/vendor/github.com/google/go-cmp/cmp/compare.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package cmp determines equality of values. // @@ -100,8 +100,8 @@ func Equal(x, y interface{}, opts ...Option) bool { // same input values and options. // // The output is displayed as a literal in pseudo-Go syntax. -// At the start of each line, a "-" prefix indicates an element removed from y, -// a "+" prefix to indicates an element added to y, and the lack of a prefix +// At the start of each line, a "-" prefix indicates an element removed from x, +// a "+" prefix to indicates an element added from y, and the lack of a prefix // indicates an element common to both x and y. If possible, the output // uses fmt.Stringer.String or error.Error methods to produce more humanly // readable outputs. In such cases, the string is prefixed with either an diff --git a/vendor/github.com/google/go-cmp/cmp/export_panic.go b/vendor/github.com/google/go-cmp/cmp/export_panic.go index dfa5d213769..5ff0b4218c6 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_panic.go +++ b/vendor/github.com/google/go-cmp/cmp/export_panic.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build purego diff --git a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go index 351f1a34b46..21eb54858e0 100644 --- a/vendor/github.com/google/go-cmp/cmp/export_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/export_unsafe.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !purego diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go index fe98dcc6774..1daaaacc5ee 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_disable.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !cmp_debug diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go index 597b6ae56b1..4b91dbcacae 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/debug_enable.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build cmp_debug diff --git a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go index 730e223ee7b..bc196b16cfa 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package diff implements an algorithm for producing edit-scripts. // The edit-script is a sequence of operations needed to transform one list @@ -119,7 +119,7 @@ func (r Result) Similar() bool { return r.NumSame+1 >= r.NumDiff } -var randInt = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) +var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0 // Difference reports whether two lists of lengths nx and ny are equal // given the definition of equality provided as f. @@ -168,17 +168,6 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // A vertical edge is equivalent to inserting a symbol from list Y. // A diagonal edge is equivalent to a matching symbol between both X and Y. - // To ensure flexibility in changing the algorithm in the future, - // introduce some degree of deliberate instability. - // This is achieved by fiddling the zigzag iterator to start searching - // the graph starting from the bottom-right versus than the top-left. - // The result may differ depending on the starting search location, - // but still produces a valid edit script. - zigzagInit := randInt // either 0 or 1 - if flags.Deterministic { - zigzagInit = 0 - } - // Invariants: // • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx // • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny @@ -197,6 +186,11 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // approximately the square-root of the search budget. searchBudget := 4 * (nx + ny) // O(n) + // Running the tests with the "cmp_debug" build tag prints a visualization + // of the algorithm running in real-time. This is educational for + // understanding how the algorithm works. See debug_enable.go. + f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) + // The algorithm below is a greedy, meet-in-the-middle algorithm for // computing sub-optimal edit-scripts between two lists. // @@ -214,22 +208,28 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { // frontier towards the opposite corner. // • This algorithm terminates when either the X coordinates or the // Y coordinates of the forward and reverse frontier points ever intersect. - // + // This algorithm is correct even if searching only in the forward direction // or in the reverse direction. We do both because it is commonly observed // that two lists commonly differ because elements were added to the front // or end of the other list. // - // Running the tests with the "cmp_debug" build tag prints a visualization - // of the algorithm running in real-time. This is educational for - // understanding how the algorithm works. See debug_enable.go. - f = debug.Begin(nx, ny, f, &fwdPath.es, &revPath.es) - for { + // Non-deterministically start with either the forward or reverse direction + // to introduce some deliberate instability so that we have the flexibility + // to change this algorithm in the future. + if flags.Deterministic || randBool { + goto forwardSearch + } else { + goto reverseSearch + } + +forwardSearch: + { // Forward search from the beginning. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break + goto finishSearch } - for stop1, stop2, i := false, false, zigzagInit; !(stop1 && stop2) && searchBudget > 0; i++ { + for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. z := zigzag(i) p := point{fwdFrontier.X + z, fwdFrontier.Y - z} @@ -262,10 +262,14 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { } else { fwdFrontier.Y++ } + goto reverseSearch + } +reverseSearch: + { // Reverse search from the end. if fwdFrontier.X >= revFrontier.X || fwdFrontier.Y >= revFrontier.Y || searchBudget == 0 { - break + goto finishSearch } for stop1, stop2, i := false, false, 0; !(stop1 && stop2) && searchBudget > 0; i++ { // Search in a diagonal pattern for a match. @@ -300,8 +304,10 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) { } else { revFrontier.Y-- } + goto forwardSearch } +finishSearch: // Join the forward and reverse paths and then append the reverse path. fwdPath.connect(revPath.point, f) for i := len(revPath.es) - 1; i >= 0; i-- { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go index a9e7fc0b5b3..d8e459c9b93 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/flags.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package flags diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go index 01aed0a1532..82d1d7fbf8a 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_legacy.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !go1.10 diff --git a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go index c0b667f58b0..8646f052934 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/flags/toolchain_recent.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build go1.10 diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index ace1dbe86e5..d127d436230 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // Package function provides functionality for identifying function types. package function diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go index 8228e7d512a..b6c12cefb47 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/name.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/name.go @@ -1,6 +1,6 @@ // Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go index e9e384a1c89..44f4a5afddc 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_purego.go @@ -1,6 +1,6 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build purego diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go index b50c17ec725..a605953d466 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/pointer_unsafe.go @@ -1,6 +1,6 @@ // Copyright 2018, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. // +build !purego diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go index 24fbae6e3c5..98533b036cc 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/sort.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go index 06a8ffd036d..9147a299731 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/value/zero.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package value diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 4b0407a7f88..e57b9eb5392 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/path.go b/vendor/github.com/google/go-cmp/cmp/path.go index 603dbb0026e..3d45c1a47f2 100644 --- a/vendor/github.com/google/go-cmp/cmp/path.go +++ b/vendor/github.com/google/go-cmp/cmp/path.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report.go b/vendor/github.com/google/go-cmp/cmp/report.go index aafcb363545..f43cd12eb5f 100644 --- a/vendor/github.com/google/go-cmp/cmp/report.go +++ b/vendor/github.com/google/go-cmp/cmp/report.go @@ -1,6 +1,6 @@ // Copyright 2017, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_compare.go b/vendor/github.com/google/go-cmp/cmp/report_compare.go index 9e2180964f1..a6c070cfcd9 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_compare.go +++ b/vendor/github.com/google/go-cmp/cmp/report_compare.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_references.go b/vendor/github.com/google/go-cmp/cmp/report_references.go index d620c2c20e7..be31b33a9e1 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_references.go +++ b/vendor/github.com/google/go-cmp/cmp/report_references.go @@ -1,6 +1,6 @@ // Copyright 2020, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_reflect.go b/vendor/github.com/google/go-cmp/cmp/report_reflect.go index 786f671269c..33f03577f98 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_reflect.go +++ b/vendor/github.com/google/go-cmp/cmp/report_reflect.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp @@ -351,6 +351,8 @@ func formatMapKey(v reflect.Value, disambiguate bool, ptrs *pointerReferences) s opts.PrintAddresses = disambiguate opts.AvoidStringer = disambiguate opts.QualifiedNames = disambiguate + opts.VerbosityLevel = maxVerbosityPreset + opts.LimitVerbosity = true s := opts.FormatValue(v, reflect.Map, ptrs).String() return strings.TrimSpace(s) } diff --git a/vendor/github.com/google/go-cmp/cmp/report_slices.go b/vendor/github.com/google/go-cmp/cmp/report_slices.go index 35315dad355..da04caf1649 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_slices.go +++ b/vendor/github.com/google/go-cmp/cmp/report_slices.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_text.go b/vendor/github.com/google/go-cmp/cmp/report_text.go index 8b12c05cd4f..0fd46d7ffb6 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_text.go +++ b/vendor/github.com/google/go-cmp/cmp/report_text.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/go-cmp/cmp/report_value.go b/vendor/github.com/google/go-cmp/cmp/report_value.go index 83031a7f507..668d470fd83 100644 --- a/vendor/github.com/google/go-cmp/cmp/report_value.go +++ b/vendor/github.com/google/go-cmp/cmp/report_value.go @@ -1,6 +1,6 @@ // Copyright 2019, The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.md file. +// license that can be found in the LICENSE file. package cmp diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 4dcc27f48eb..5ab6e9b9b08 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -35,7 +35,10 @@ func (p *Profile) Compact() *Profile { // functions and mappings. Profiles must have identical profile sample // and period types or the merge will fail. profile.Period of the // resulting profile will be the maximum of all profiles, and -// profile.TimeNanos will be the earliest nonzero one. +// profile.TimeNanos will be the earliest nonzero one. Merges are +// associative with the caveat of the first profile having some +// specialization in how headers are combined. There may be other +// subtleties now or in the future regarding associativity. func Merge(srcs []*Profile) (*Profile, error) { if len(srcs) == 0 { return nil, fmt.Errorf("no profiles to merge") diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index d94d8b3d1cc..2590c8ddb42 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -22,6 +22,7 @@ import ( "fmt" "io" "io/ioutil" + "math" "path/filepath" "regexp" "sort" @@ -712,7 +713,8 @@ func (s *Sample) DiffBaseSample() bool { return s.HasLabel("pprof::base", "true") } -// Scale multiplies all sample values in a profile by a constant. +// Scale multiplies all sample values in a profile by a constant and keeps +// only samples that have at least one non-zero value. func (p *Profile) Scale(ratio float64) { if ratio == 1 { return @@ -724,7 +726,8 @@ func (p *Profile) Scale(ratio float64) { p.ScaleN(ratios) } -// ScaleN multiplies each sample values in a sample by a different amount. +// ScaleN multiplies each sample values in a sample by a different amount +// and keeps only samples that have at least one non-zero value. func (p *Profile) ScaleN(ratios []float64) error { if len(p.SampleType) != len(ratios) { return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType)) @@ -739,13 +742,22 @@ func (p *Profile) ScaleN(ratios []float64) error { if allOnes { return nil } + fillIdx := 0 for _, s := range p.Sample { + keepSample := false for i, v := range s.Value { if ratios[i] != 1 { - s.Value[i] = int64(float64(v) * ratios[i]) + val := int64(math.Round(float64(v) * ratios[i])) + s.Value[i] = val + keepSample = keepSample || val != 0 } } + if keepSample { + p.Sample[fillIdx] = s + fillIdx++ + } } + p.Sample = p.Sample[:fillIdx] return nil } diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index 9d92c11f16f..f765a46f915 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -16,4 +16,4 @@ change is the ability to represent an invalid UUID (vs a NIL UUID). Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: -http://godoc.org/github.com/google/uuid +http://pkg.go.dev/github.com/google/uuid diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go index 7f9e0c6c0e3..14bd34072b6 100644 --- a/vendor/github.com/google/uuid/marshal.go +++ b/vendor/github.com/google/uuid/marshal.go @@ -16,10 +16,11 @@ func (uuid UUID) MarshalText() ([]byte, error) { // UnmarshalText implements encoding.TextUnmarshaler. func (uuid *UUID) UnmarshalText(data []byte) error { id, err := ParseBytes(data) - if err == nil { - *uuid = id + if err != nil { + return err } - return err + *uuid = id + return nil } // MarshalBinary implements encoding.BinaryMarshaler. diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go index 199a1ac6540..463109629ee 100644 --- a/vendor/github.com/google/uuid/version1.go +++ b/vendor/github.com/google/uuid/version1.go @@ -17,12 +17,6 @@ import ( // // In most cases, New should be used. func NewUUID() (UUID, error) { - nodeMu.Lock() - if nodeID == zeroID { - setNodeInterface("") - } - nodeMu.Unlock() - var uuid UUID now, seq, err := GetTime() if err != nil { @@ -38,7 +32,13 @@ func NewUUID() (UUID, error) { binary.BigEndian.PutUint16(uuid[4:], timeMid) binary.BigEndian.PutUint16(uuid[6:], timeHi) binary.BigEndian.PutUint16(uuid[8:], seq) + + nodeMu.Lock() + if nodeID == zeroID { + setNodeInterface("") + } copy(uuid[10:], nodeID[:]) + nodeMu.Unlock() return uuid, nil } diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go index 84af91c9f54..c110465db59 100644 --- a/vendor/github.com/google/uuid/version4.go +++ b/vendor/github.com/google/uuid/version4.go @@ -27,8 +27,13 @@ func New() UUID { // equivalent to the odds of creating a few tens of trillions of UUIDs in a // year and having one duplicate. func NewRandom() (UUID, error) { + return NewRandomFromReader(rander) +} + +// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader. +func NewRandomFromReader(r io.Reader) (UUID, error) { var uuid UUID - _, err := io.ReadFull(rander, uuid[:]) + _, err := io.ReadFull(r, uuid[:]) if err != nil { return Nil, err } diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore deleted file mode 100644 index dd91ed20559..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -**/*.swp -.idea -.vscode diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml deleted file mode 100644 index a6eb99c4a9d..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -language: go -sudo: false -install: -- GO111MODULE=off go get golang.org/x/crypto/ssh -- GO111MODULE=off go get -v -tags 'fixtures acceptance' ./... -- GO111MODULE=off go get github.com/wadey/gocovmerge -- GO111MODULE=off go get github.com/mattn/goveralls -- GO111MODULE=off go get golang.org/x/tools/cmd/goimports -go: -- "1.13" -- "1.14" -- "1.15" -- "tip" -env: - global: - - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" - - GO111MODULE=on -before_script: -- go vet ./... -script: -- ./script/coverage -- ./script/unittest -- ./script/format -after_success: -- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml deleted file mode 100644 index 5da89829150..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml +++ /dev/null @@ -1,136 +0,0 @@ -- job: - name: gophercloud-unittest - parent: golang-test - description: | - Run gophercloud unit test - run: .zuul/playbooks/gophercloud-unittest/run.yaml - nodeset: ubuntu-xenial-ut - -- job: - name: gophercloud-acceptance-test - parent: golang-test - description: | - Run gophercloud acceptance test on master branch - run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml - timeout: 18000 # 5 hours - nodeset: ubuntu-bionic - -- job: - name: gophercloud-acceptance-test-ironic - parent: golang-test - description: | - Run gophercloud ironic acceptance test on master branch - run: .zuul/playbooks/gophercloud-acceptance-test-ironic/run.yaml - nodeset: ubuntu-bionic - -- job: - name: gophercloud-acceptance-test-ussuri - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on ussuri branch - vars: - global_env: - OS_BRANCH: stable/ussuri - -- job: - name: gophercloud-acceptance-test-train - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on train branch - vars: - global_env: - OS_BRANCH: stable/train - -- job: - name: gophercloud-acceptance-test-stein - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on stein branch - vars: - global_env: - OS_BRANCH: stable/stein - -- job: - name: gophercloud-acceptance-test-rocky - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on rocky branch - vars: - global_env: - OS_BRANCH: stable/rocky - -- job: - name: gophercloud-acceptance-test-queens - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on queens branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/queens - -# NOTE: A Pike-based devstack environment is currently -# not building correctly. This might be a temporary issue. -- job: - name: gophercloud-acceptance-test-pike - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on pike branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/pike - -- job: - name: gophercloud-acceptance-test-ocata - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on ocata branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/ocata - -# NOTE: A Newton-based devstack environment is currently -# not building correctly. This might be a temporary issue. -- job: - name: gophercloud-acceptance-test-newton - parent: gophercloud-acceptance-test - description: | - Run gophercloud acceptance test on newton branch - nodeset: ubuntu-xenial - vars: - global_env: - OS_BRANCH: stable/newton - -- project: - name: gophercloud/gophercloud - check: - jobs: - - gophercloud-unittest - - gophercloud-acceptance-test - - gophercloud-acceptance-test-ironic - recheck-newton: - jobs: - - gophercloud-acceptance-test-newton - recheck-ocata: - jobs: - - gophercloud-acceptance-test-ocata - recheck-pike: - jobs: - - gophercloud-acceptance-test-pike - recheck-queens: - jobs: - - gophercloud-acceptance-test-queens - recheck-rocky: - jobs: - - gophercloud-acceptance-test-rocky - recheck-stein: - jobs: - - gophercloud-acceptance-test-stein - recheck-train: - jobs: - - gophercloud-acceptance-test-train - recheck-ussuri: - jobs: - - gophercloud-acceptance-test-ussuri diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md deleted file mode 100644 index b38b1016886..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md +++ /dev/null @@ -1,437 +0,0 @@ -## 0.15.0 (Unreleased) - -## 0.14.0 (November 11, 2020) - -IMPROVEMENTS - -* Added `identity/v3/endpoints.Endpoint.Enabled` [GH-2030](https://github.com/gophercloud/gophercloud/pull/2030) -* Added `containerinfra/v1/clusters.Upgrade` [GH-2032](https://github.com/gophercloud/gophercloud/pull/2032) -* Added `compute/apiversions.List` [GH-2037](https://github.com/gophercloud/gophercloud/pull/2037) -* Added `compute/apiversions.Get` [GH-2037](https://github.com/gophercloud/gophercloud/pull/2037) -* Added `compute/v2/servers.ListOpts.IP` [GH-2038](https://github.com/gophercloud/gophercloud/pull/2038) -* Added `compute/v2/servers.ListOpts.IP6` [GH-2038](https://github.com/gophercloud/gophercloud/pull/2038) -* Added `compute/v2/servers.ListOpts.UserID` [GH-2038](https://github.com/gophercloud/gophercloud/pull/2038) -* Added `dns/v2/transfer/accept.List` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `dns/v2/transfer/accept.Get` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `dns/v2/transfer/accept.Create` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `dns/v2/transfer/requests.List` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `dns/v2/transfer/requests.Get` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `dns/v2/transfer/requests.Update` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `dns/v2/transfer/requests.Delete` [GH-2041](https://github.com/gophercloud/gophercloud/pull/2041) -* Added `baremetal/v1/nodes.RescueWait` [GH-2052](https://github.com/gophercloud/gophercloud/pull/2052) -* Added `baremetal/v1/nodes.Unrescuing` [GH-2052](https://github.com/gophercloud/gophercloud/pull/2052) -* Added `networking/v2/extensions/fwaas_v2/groups.List` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) -* Added `networking/v2/extensions/fwaas_v2/groups.Get` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) -* Added `networking/v2/extensions/fwaas_v2/groups.Create` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) -* Added `networking/v2/extensions/fwaas_v2/groups.Update` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) -* Added `networking/v2/extensions/fwaas_v2/groups.Delete` [GH-2050](https://github.com/gophercloud/gophercloud/pull/2050) - -BUG FIXES - -* Changed `networking/v2/extensions/layer3/routers.Routes` from `[]Route` to `*[]Route` [GH-2043](https://github.com/gophercloud/gophercloud/pull/2043) - -## 0.13.0 (September 27, 2020) - -IMPROVEMENTS - -* Added `ProtocolTerminatedHTTPS` as a valid listener protocol to `loadbalancer/v2/listeners` [GH-1992](https://github.com/gophercloud/gophercloud/pull/1992) -* Added `objectstorage/v1/objects.CreateTempURLOpts.Timestamp` [GH-1994](https://github.com/gophercloud/gophercloud/pull/1994) -* Added `compute/v2/extensions/schedulerhints.SchedulerHints.DifferentCell` [GH-2012](https://github.com/gophercloud/gophercloud/pull/2012) -* Added `loadbalancer/v2/quotas.Get` [GH-2010](https://github.com/gophercloud/gophercloud/pull/2010) -* Added `messaging/v2/queues.CreateOpts.EnableEncryptMessages` [GH-2016](https://github.com/gophercloud/gophercloud/pull/2016) -* Added `messaging/v2/queues.ListOpts.Name` [GH-2018](https://github.com/gophercloud/gophercloud/pull/2018) -* Added `messaging/v2/queues.ListOpts.WithCount` [GH-2018](https://github.com/gophercloud/gophercloud/pull/2018) -* Added `loadbalancer/v2/quotas.Update` [GH-2023](https://github.com/gophercloud/gophercloud/pull/2023) -* Added `loadbalancer/v2/loadbalancers.ListOpts.AvailabilityZone` [GH-2026](https://github.com/gophercloud/gophercloud/pull/2026) -* Added `loadbalancer/v2/loadbalancers.CreateOpts.AvailabilityZone` [GH-2026](https://github.com/gophercloud/gophercloud/pull/2026) -* Added `loadbalancer/v2/loadbalancers.LoadBalancer.AvailabilityZone` [GH-2026](https://github.com/gophercloud/gophercloud/pull/2026) -* Added `networking/v2/extensions/layer3/routers.ListL3Agents` [GH-2025](https://github.com/gophercloud/gophercloud/pull/2025) - -BUG FIXES - -* Fixed URL escaping in `objectstorage/v1/objects.CreateTempURL` [GH-1994](https://github.com/gophercloud/gophercloud/pull/1994) -* Remove unused `ServiceClient` from `compute/v2/servers.CreateOpts` [GH-2004](https://github.com/gophercloud/gophercloud/pull/2004) -* Changed `objectstorage/v1/objects.CreateOpts.DeleteAfter` from `int` to `int64` [GH-2014](https://github.com/gophercloud/gophercloud/pull/2014) -* Changed `objectstorage/v1/objects.CreateOpts.DeleteAt` from `int` to `int64` [GH-2014](https://github.com/gophercloud/gophercloud/pull/2014) -* Changed `objectstorage/v1/objects.UpdateOpts.DeleteAfter` from `int` to `int64` [GH-2014](https://github.com/gophercloud/gophercloud/pull/2014) -* Changed `objectstorage/v1/objects.UpdateOpts.DeleteAt` from `int` to `int64` [GH-2014](https://github.com/gophercloud/gophercloud/pull/2014) - - -## 0.12.0 (June 25, 2020) - -UPGRADE NOTES - -* The URL used in the `compute/v2/extensions/bootfromvolume` package has been changed from `os-volumes_boot` to `servers`. - -IMPROVEMENTS - -* The URL used in the `compute/v2/extensions/bootfromvolume` package has been changed from `os-volumes_boot` to `servers` [GH-1973](https://github.com/gophercloud/gophercloud/pull/1973) -* Modify `baremetal/v1/nodes.LogicalDisk.PhysicalDisks` type to support physical disks hints [GH-1982](https://github.com/gophercloud/gophercloud/pull/1982) -* Added `baremetalintrospection/httpbasic` which provides an HTTP Basic Auth client [GH-1986](https://github.com/gophercloud/gophercloud/pull/1986) -* Added `baremetal/httpbasic` which provides an HTTP Basic Auth client [GH-1983](https://github.com/gophercloud/gophercloud/pull/1983) -* Added `containerinfra/v1/clusters.CreateOpts.MergeLabels` [GH-1985](https://github.com/gophercloud/gophercloud/pull/1985) - -BUG FIXES - -* Changed `containerinfra/v1/clusters.Cluster.HealthStatusReason` from `string` to `map[string]interface{}` [GH-1968](https://github.com/gophercloud/gophercloud/pull/1968) -* Fixed marshalling of `blockstorage/extensions/backups.ImportBackup.Metadata` [GH-1967](https://github.com/gophercloud/gophercloud/pull/1967) -* Fixed typo of "OAUth" to "OAuth" in `identity/v3/extensions/oauth1` [GH-1969](https://github.com/gophercloud/gophercloud/pull/1969) -* Fixed goroutine leak during reauthentication [GH-1978](https://github.com/gophercloud/gophercloud/pull/1978) -* Changed `baremetalintrospection/v1/introspection.RootDiskType.Size` from `int` to `int64` [GH-1988](https://github.com/gophercloud/gophercloud/pull/1988) - -## 0.11.0 (May 14, 2020) - -UPGRADE NOTES - -* Object storage container and object names are now URL encoded [GH-1930](https://github.com/gophercloud/gophercloud/pull/1930) -* All responses now have access to the returned headers. Please report any issues this has caused [GH-1942](https://github.com/gophercloud/gophercloud/pull/1942) -* Changes have been made to the internal HTTP client to ensure response bodies are handled in a way that enables connections to be re-used more efficiently [GH-1952](https://github.com/gophercloud/gophercloud/pull/1952) - -IMPROVEMENTS - -* Added `objectstorage/v1/containers.BulkDelete` [GH-1930](https://github.com/gophercloud/gophercloud/pull/1930) -* Added `objectstorage/v1/objects.BulkDelete` [GH-1930](https://github.com/gophercloud/gophercloud/pull/1930) -* Object storage container and object names are now URL encoded [GH-1930](https://github.com/gophercloud/gophercloud/pull/1930) -* All responses now have access to the returned headers [GH-1942](https://github.com/gophercloud/gophercloud/pull/1942) -* Added `compute/v2/extensions/injectnetworkinfo.InjectNetworkInfo` [GH-1941](https://github.com/gophercloud/gophercloud/pull/1941) -* Added `compute/v2/extensions/resetnetwork.ResetNetwork` [GH-1941](https://github.com/gophercloud/gophercloud/pull/1941) -* Added `identity/v3/extensions/trusts.ListRoles` [GH-1939](https://github.com/gophercloud/gophercloud/pull/1939) -* Added `identity/v3/extensions/trusts.GetRole` [GH-1939](https://github.com/gophercloud/gophercloud/pull/1939) -* Added `identity/v3/extensions/trusts.CheckRole` [GH-1939](https://github.com/gophercloud/gophercloud/pull/1939) -* Added `identity/v3/extensions/oauth1.Create` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.CreateConsumer` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.DeleteConsumer` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.ListConsumers` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.GetConsumer` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.UpdateConsumer` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.RequestToken` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.AuthorizeToken` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.CreateAccessToken` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.GetAccessToken` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.RevokeAccessToken` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.ListAccessTokens` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.ListAccessTokenRoles` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `identity/v3/extensions/oauth1.GetAccessTokenRole` [GH-1935](https://github.com/gophercloud/gophercloud/pull/1935) -* Added `networking/v2/extensions/agents.Update` [GH-1954](https://github.com/gophercloud/gophercloud/pull/1954) -* Added `networking/v2/extensions/agents.Delete` [GH-1954](https://github.com/gophercloud/gophercloud/pull/1954) -* Added `networking/v2/extensions/agents.ScheduleDHCPNetwork` [GH-1954](https://github.com/gophercloud/gophercloud/pull/1954) -* Added `networking/v2/extensions/agents.RemoveDHCPNetwork` [GH-1954](https://github.com/gophercloud/gophercloud/pull/1954) -* Added `identity/v3/projects.CreateOpts.Extra` [GH-1951](https://github.com/gophercloud/gophercloud/pull/1951) -* Added `identity/v3/projects.CreateOpts.Options` [GH-1951](https://github.com/gophercloud/gophercloud/pull/1951) -* Added `identity/v3/projects.UpdateOpts.Extra` [GH-1951](https://github.com/gophercloud/gophercloud/pull/1951) -* Added `identity/v3/projects.UpdateOpts.Options` [GH-1951](https://github.com/gophercloud/gophercloud/pull/1951) -* Added `identity/v3/projects.Project.Extra` [GH-1951](https://github.com/gophercloud/gophercloud/pull/1951) -* Added `identity/v3/projects.Options.Options` [GH-1951](https://github.com/gophercloud/gophercloud/pull/1951) -* Added `imageservice/v2/images.Image.OpenStackImageImportMethods` [GH-1962](https://github.com/gophercloud/gophercloud/pull/1962) -* Added `imageservice/v2/images.Image.OpenStackImageStoreIDs` [GH-1962](https://github.com/gophercloud/gophercloud/pull/1962) - -BUG FIXES - -* Changed`identity/v3/extensions/trusts.Trust.RemainingUses` from `bool` to `int` [GH-1939](https://github.com/gophercloud/gophercloud/pull/1939) -* Changed `identity/v3/applicationcredentials.CreateOpts.ExpiresAt` from `string` to `*time.Time` [GH-1937](https://github.com/gophercloud/gophercloud/pull/1937) -* Fixed issue with unmarshalling/decoding slices of composed structs [GH-1964](https://github.com/gophercloud/gophercloud/pull/1964) - -## 0.10.0 (April 12, 2020) - -UPGRADE NOTES - -* The various `IDFromName` convenience functions have been moved to https://github.com/gophercloud/utils [GH-1897](https://github.com/gophercloud/gophercloud/pull/1897) -* `sharedfilesystems/v2/shares.GetExportLocations` was renamed to `sharedfilesystems/v2/shares.ListExportLocations` [GH-1932](https://github.com/gophercloud/gophercloud/pull/1932) - -IMPROVEMENTS - -* Added `blockstorage/extensions/volumeactions.SetBootable` [GH-1891](https://github.com/gophercloud/gophercloud/pull/1891) -* Added `blockstorage/extensions/backups.Export` [GH-1894](https://github.com/gophercloud/gophercloud/pull/1894) -* Added `blockstorage/extensions/backups.Import` [GH-1894](https://github.com/gophercloud/gophercloud/pull/1894) -* Added `placement/v1/resourceproviders.GetTraits` [GH-1899](https://github.com/gophercloud/gophercloud/pull/1899) -* Added the ability to authenticate with Amazon EC2 Credentials [GH-1900](https://github.com/gophercloud/gophercloud/pull/1900) -* Added ability to list Nova services by binary and host [GH-1904](https://github.com/gophercloud/gophercloud/pull/1904) -* Added `compute/v2/extensions/services.Update` [GH-1902](https://github.com/gophercloud/gophercloud/pull/1902) -* Added system scope to v3 authentication [GH-1908](https://github.com/gophercloud/gophercloud/pull/1908) -* Added `identity/v3/extensions/ec2tokens.ValidateS3Token` [GH-1906](https://github.com/gophercloud/gophercloud/pull/1906) -* Added `containerinfra/v1/clusters.Cluster.HealthStatus` [GH-1910](https://github.com/gophercloud/gophercloud/pull/1910) -* Added `containerinfra/v1/clusters.Cluster.HealthStatusReason` [GH-1910](https://github.com/gophercloud/gophercloud/pull/1910) -* Added `loadbalancer/v2/amphorae.Failover` [GH-1912](https://github.com/gophercloud/gophercloud/pull/1912) -* Added `identity/v3/extensions/ec2credentials.List` [GH-1916](https://github.com/gophercloud/gophercloud/pull/1916) -* Added `identity/v3/extensions/ec2credentials.Get` [GH-1916](https://github.com/gophercloud/gophercloud/pull/1916) -* Added `identity/v3/extensions/ec2credentials.Create` [GH-1916](https://github.com/gophercloud/gophercloud/pull/1916) -* Added `identity/v3/extensions/ec2credentials.Delete` [GH-1916](https://github.com/gophercloud/gophercloud/pull/1916) -* Added `ErrUnexpectedResponseCode.ResponseHeader` [GH-1919](https://github.com/gophercloud/gophercloud/pull/1919) -* Added support for TOTP authentication [GH-1922](https://github.com/gophercloud/gophercloud/pull/1922) -* `sharedfilesystems/v2/shares.GetExportLocations` was renamed to `sharedfilesystems/v2/shares.ListExportLocations` [GH-1932](https://github.com/gophercloud/gophercloud/pull/1932) -* Added `sharedfilesystems/v2/shares.GetExportLocation` [GH-1932](https://github.com/gophercloud/gophercloud/pull/1932) -* Added `sharedfilesystems/v2/shares.Revert` [GH-1931](https://github.com/gophercloud/gophercloud/pull/1931) -* Added `sharedfilesystems/v2/shares.ResetStatus` [GH-1931](https://github.com/gophercloud/gophercloud/pull/1931) -* Added `sharedfilesystems/v2/shares.ForceDelete` [GH-1931](https://github.com/gophercloud/gophercloud/pull/1931) -* Added `sharedfilesystems/v2/shares.Unmanage` [GH-1931](https://github.com/gophercloud/gophercloud/pull/1931) -* Added `blockstorage/v3/attachments.Create` [GH-1934](https://github.com/gophercloud/gophercloud/pull/1934) -* Added `blockstorage/v3/attachments.List` [GH-1934](https://github.com/gophercloud/gophercloud/pull/1934) -* Added `blockstorage/v3/attachments.Get` [GH-1934](https://github.com/gophercloud/gophercloud/pull/1934) -* Added `blockstorage/v3/attachments.Update` [GH-1934](https://github.com/gophercloud/gophercloud/pull/1934) -* Added `blockstorage/v3/attachments.Delete` [GH-1934](https://github.com/gophercloud/gophercloud/pull/1934) -* Added `blockstorage/v3/attachments.Complete` [GH-1934](https://github.com/gophercloud/gophercloud/pull/1934) - -BUG FIXES - -* Fixed issue with Orchestration `get_file` only being able to read JSON and YAML files [GH-1915](https://github.com/gophercloud/gophercloud/pull/1915) - -## 0.9.0 (March 10, 2020) - -UPGRADE NOTES - -* The way we implement new API result fields added by microversions has changed. Previously, we would declare a dedicated `ExtractFoo` function in a file called `microversions.go`. Now, we are declaring those fields inline of the original result struct as a pointer. [GH-1854](https://github.com/gophercloud/gophercloud/pull/1854) - -* `compute/v2/servers.CreateOpts.Networks` has changed from `[]Network` to `interface{}` in order to support creating servers that have no networks. [GH-1884](https://github.com/gophercloud/gophercloud/pull/1884) - -IMPROVEMENTS - -* Added `compute/v2/extensions/instanceactions.List` [GH-1848](https://github.com/gophercloud/gophercloud/pull/1848) -* Added `compute/v2/extensions/instanceactions.Get` [GH-1848](https://github.com/gophercloud/gophercloud/pull/1848) -* Added `networking/v2/ports.List.FixedIPs` [GH-1849](https://github.com/gophercloud/gophercloud/pull/1849) -* Added `identity/v3/extensions/trusts.List` [GH-1855](https://github.com/gophercloud/gophercloud/pull/1855) -* Added `identity/v3/extensions/trusts.Get` [GH-1855](https://github.com/gophercloud/gophercloud/pull/1855) -* Added `identity/v3/extensions/trusts.Trust.ExpiresAt` [GH-1857](https://github.com/gophercloud/gophercloud/pull/1857) -* Added `identity/v3/extensions/trusts.Trust.DeletedAt` [GH-1857](https://github.com/gophercloud/gophercloud/pull/1857) -* Added `compute/v2/extensions/instanceactions.InstanceActionDetail` [GH-1851](https://github.com/gophercloud/gophercloud/pull/1851) -* Added `compute/v2/extensions/instanceactions.Event` [GH-1851](https://github.com/gophercloud/gophercloud/pull/1851) -* Added `compute/v2/extensions/instanceactions.ListOpts` [GH-1858](https://github.com/gophercloud/gophercloud/pull/1858) -* Added `objectstorage/v1/containers.UpdateOpts.TempURLKey` [GH-1864](https://github.com/gophercloud/gophercloud/pull/1864) -* Added `objectstorage/v1/containers.UpdateOpts.TempURLKey2` [GH-1864](https://github.com/gophercloud/gophercloud/pull/1864) -* Added `placement/v1/resourceproviders.GetUsages` [GH-1862](https://github.com/gophercloud/gophercloud/pull/1862) -* Added `placement/v1/resourceproviders.GetInventories` [GH-1862](https://github.com/gophercloud/gophercloud/pull/1862) -* Added `imageservice/v2/images.ReplaceImageMinRam` [GH-1867](https://github.com/gophercloud/gophercloud/pull/1867) -* Added `objectstorage/v1/containers.UpdateOpts.TempURLKey` [GH-1865](https://github.com/gophercloud/gophercloud/pull/1865) -* Added `objectstorage/v1/containers.CreateOpts.TempURLKey2` [GH-1865](https://github.com/gophercloud/gophercloud/pull/1865) -* Added `blockstorage/extensions/volumetransfers.List` [GH-1869](https://github.com/gophercloud/gophercloud/pull/1869) -* Added `blockstorage/extensions/volumetransfers.Create` [GH-1869](https://github.com/gophercloud/gophercloud/pull/1869) -* Added `blockstorage/extensions/volumetransfers.Accept` [GH-1869](https://github.com/gophercloud/gophercloud/pull/1869) -* Added `blockstorage/extensions/volumetransfers.Get` [GH-1869](https://github.com/gophercloud/gophercloud/pull/1869) -* Added `blockstorage/extensions/volumetransfers.Delete` [GH-1869](https://github.com/gophercloud/gophercloud/pull/1869) -* Added `blockstorage/extensions/backups.RestoreFromBackup` [GH-1871](https://github.com/gophercloud/gophercloud/pull/1871) -* Added `blockstorage/v3/volumes.CreateOpts.BackupID` [GH-1871](https://github.com/gophercloud/gophercloud/pull/1871) -* Added `blockstorage/v3/volumes.Volume.BackupID` [GH-1871](https://github.com/gophercloud/gophercloud/pull/1871) -* Added `identity/v3/projects.ListOpts.Tags` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Added `identity/v3/projects.ListOpts.TagsAny` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Added `identity/v3/projects.ListOpts.NotTags` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Added `identity/v3/projects.ListOpts.NotTagsAny` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Added `identity/v3/projects.CreateOpts.Tags` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Added `identity/v3/projects.UpdateOpts.Tags` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Added `identity/v3/projects.Project.Tags` [GH-1882](https://github.com/gophercloud/gophercloud/pull/1882) -* Changed `compute/v2/servers.CreateOpts.Networks` from `[]Network` to `interface{}` to support creating servers with no networks. [GH-1884](https://github.com/gophercloud/gophercloud/pull/1884) - - -BUG FIXES - -* Added support for `int64` headers, which were previously being silently dropped [GH-1860](https://github.com/gophercloud/gophercloud/pull/1860) -* Allow image properties with empty values [GH-1875](https://github.com/gophercloud/gophercloud/pull/1875) -* Fixed `compute/v2/extensions/extendedserverattributes.ServerAttributesExt.Userdata` JSON tag [GH-1881](https://github.com/gophercloud/gophercloud/pull/1881) - -## 0.8.0 (February 8, 2020) - -UPGRADE NOTES - -* The behavior of `keymanager/v1/acls.SetOpts` has changed. Instead of a struct, it is now `[]SetOpt`. See [GH-1816](https://github.com/gophercloud/gophercloud/pull/1816) for implementation details. - -IMPROVEMENTS - -* The result of `containerinfra/v1/clusters.Resize` now returns only the UUID when calling `Extract`. This is a backwards-breaking change from the previous struct that was returned [GH-1649](https://github.com/gophercloud/gophercloud/pull/1649) -* Added `compute/v2/extensions/shelveunshelve.Shelve` [GH-1799](https://github.com/gophercloud/gophercloud/pull/1799) -* Added `compute/v2/extensions/shelveunshelve.ShelveOffload` [GH-1799](https://github.com/gophercloud/gophercloud/pull/1799) -* Added `compute/v2/extensions/shelveunshelve.Unshelve` [GH-1799](https://github.com/gophercloud/gophercloud/pull/1799) -* Added `containerinfra/v1/nodegroups.Get` [GH-1774](https://github.com/gophercloud/gophercloud/pull/1774) -* Added `containerinfra/v1/nodegroups.List` [GH-1774](https://github.com/gophercloud/gophercloud/pull/1774) -* Added `orchestration/v1/resourcetypes.List` [GH-1806](https://github.com/gophercloud/gophercloud/pull/1806) -* Added `orchestration/v1/resourcetypes.GetSchema` [GH-1806](https://github.com/gophercloud/gophercloud/pull/1806) -* Added `orchestration/v1/resourcetypes.GenerateTemplate` [GH-1806](https://github.com/gophercloud/gophercloud/pull/1806) -* Added `keymanager/v1/acls.SetOpt` and changed `keymanager/v1/acls.SetOpts` to `[]SetOpt` [GH-1816](https://github.com/gophercloud/gophercloud/pull/1816) -* Added `blockstorage/apiversions.List` [GH-458](https://github.com/gophercloud/gophercloud/pull/458) -* Added `blockstorage/apiversions.Get` [GH-458](https://github.com/gophercloud/gophercloud/pull/458) -* Added `StatusCodeError` interface and `GetStatusCode` convenience method [GH-1820](https://github.com/gophercloud/gophercloud/pull/1820) -* Added pagination support to `compute/v2/extensions/usage.SingleTenant` [GH-1819](https://github.com/gophercloud/gophercloud/pull/1819) -* Added pagination support to `compute/v2/extensions/usage.AllTenants` [GH-1819](https://github.com/gophercloud/gophercloud/pull/1819) -* Added `placement/v1/resourceproviders.List` [GH-1815](https://github.com/gophercloud/gophercloud/pull/1815) -* Allow `CreateMemberOptsBuilder` to be passed in `loadbalancer/v2/pools.Create` [GH-1822](https://github.com/gophercloud/gophercloud/pull/1822) -* Added `Backup` to `loadbalancer/v2/pools.CreateMemberOpts` [GH-1824](https://github.com/gophercloud/gophercloud/pull/1824) -* Added `MonitorAddress` to `loadbalancer/v2/pools.CreateMemberOpts` [GH-1824](https://github.com/gophercloud/gophercloud/pull/1824) -* Added `MonitorPort` to `loadbalancer/v2/pools.CreateMemberOpts` [GH-1824](https://github.com/gophercloud/gophercloud/pull/1824) -* Changed `Impersonation` to a non-required field in `identity/v3/extensions/trusts.CreateOpts` [GH-1818](https://github.com/gophercloud/gophercloud/pull/1818) -* Added `InsertHeaders` to `loadbalancer/v2/listeners.UpdateOpts` [GH-1835](https://github.com/gophercloud/gophercloud/pull/1835) -* Added `NUMATopology` to `baremetalintrospection/v1/introspection.Data` [GH-1842](https://github.com/gophercloud/gophercloud/pull/1842) -* Added `placement/v1/resourceproviders.Create` [GH-1841](https://github.com/gophercloud/gophercloud/pull/1841) -* Added `blockstorage/extensions/volumeactions.UploadImageOpts.Visibility` [GH-1873](https://github.com/gophercloud/gophercloud/pull/1873) -* Added `blockstorage/extensions/volumeactions.UploadImageOpts.Protected` [GH-1873](https://github.com/gophercloud/gophercloud/pull/1873) -* Added `blockstorage/extensions/volumeactions.VolumeImage.Visibility` [GH-1873](https://github.com/gophercloud/gophercloud/pull/1873) -* Added `blockstorage/extensions/volumeactions.VolumeImage.Protected` [GH-1873](https://github.com/gophercloud/gophercloud/pull/1873) - -BUG FIXES - -* Changed `sort_key` to `sort_keys` in ` workflow/v2/crontriggers.ListOpts` [GH-1809](https://github.com/gophercloud/gophercloud/pull/1809) -* Allow `blockstorage/extensions/schedulerstats.Capabilities.MaxOverSubscriptionRatio` to accept both string and int/float responses [GH-1817](https://github.com/gophercloud/gophercloud/pull/1817) -* Fixed bug in `NewLoadBalancerV2` for situations when the LBaaS service was advertised without a `/v2.0` endpoint [GH-1829](https://github.com/gophercloud/gophercloud/pull/1829) -* Fixed JSON tags in `baremetal/v1/ports.UpdateOperation` [GH-1840](https://github.com/gophercloud/gophercloud/pull/1840) -* Fixed JSON tags in `networking/v2/extensions/lbaas/vips.commonResult.Extract()` [GH-1840](https://github.com/gophercloud/gophercloud/pull/1840) - -## 0.7.0 (December 3, 2019) - -IMPROVEMENTS - -* Allow a token to be used directly for authentication instead of generating a new token based on a given token [GH-1752](https://github.com/gophercloud/gophercloud/pull/1752) -* Moved `tags.ServerTagsExt` to servers.TagsExt` [GH-1760](https://github.com/gophercloud/gophercloud/pull/1760) -* Added `tags`, `tags-any`, `not-tags`, and `not-tags-any` to `compute/v2/servers.ListOpts` [GH-1759](https://github.com/gophercloud/gophercloud/pull/1759) -* Added `AccessRule` to `identity/v3/applicationcredentials` [GH-1758](https://github.com/gophercloud/gophercloud/pull/1758) -* Gophercloud no longer returns an error when multiple endpoints are found. Instead, it will choose the first endpoint and discard the others [GH-1766](https://github.com/gophercloud/gophercloud/pull/1766) -* Added `networking/v2/extensions/fwaas_v2/rules.Create` [GH-1768](https://github.com/gophercloud/gophercloud/pull/1768) -* Added `networking/v2/extensions/fwaas_v2/rules.Delete` [GH-1771](https://github.com/gophercloud/gophercloud/pull/1771) -* Added `loadbalancer/v2/providers.List` [GH-1765](https://github.com/gophercloud/gophercloud/pull/1765) -* Added `networking/v2/extensions/fwaas_v2/rules.Get` [GH-1772](https://github.com/gophercloud/gophercloud/pull/1772) -* Added `networking/v2/extensions/fwaas_v2/rules.Update` [GH-1776](https://github.com/gophercloud/gophercloud/pull/1776) -* Added `networking/v2/extensions/fwaas_v2/rules.List` [GH-1783](https://github.com/gophercloud/gophercloud/pull/1783) -* Added `MaxRetriesDown` into `loadbalancer/v2/monitors.CreateOpts` [GH-1785](https://github.com/gophercloud/gophercloud/pull/1785) -* Added `MaxRetriesDown` into `loadbalancer/v2/monitors.UpdateOpts` [GH-1786](https://github.com/gophercloud/gophercloud/pull/1786) -* Added `MaxRetriesDown` into `loadbalancer/v2/monitors.Monitor` [GH-1787](https://github.com/gophercloud/gophercloud/pull/1787) -* Added `MaxRetriesDown` into `loadbalancer/v2/monitors.ListOpts` [GH-1788](https://github.com/gophercloud/gophercloud/pull/1788) -* Updated `go.mod` dependencies, specifically to account for CVE-2019-11840 with `golang.org/x/crypto` [GH-1793](https://github.com/gophercloud/gophercloud/pull/1788) - -## 0.6.0 (October 17, 2019) - -UPGRADE NOTES - -* The way reauthentication works has been refactored. This should not cause a problem, but please report bugs if it does. See [GH-1746](https://github.com/gophercloud/gophercloud/pull/1746) for more information. - -IMPROVEMENTS - -* Added `networking/v2/extensions/quotas.Get` [GH-1742](https://github.com/gophercloud/gophercloud/pull/1742) -* Added `networking/v2/extensions/quotas.Update` [GH-1747](https://github.com/gophercloud/gophercloud/pull/1747) -* Refactored the reauthentication implementation to use goroutines and added a check to prevent an infinite loop in certain situations. [GH-1746](https://github.com/gophercloud/gophercloud/pull/1746) - -BUG FIXES - -* Changed `Flavor` to `FlavorID` in `loadbalancer/v2/loadbalancers` [GH-1744](https://github.com/gophercloud/gophercloud/pull/1744) -* Changed `Flavor` to `FlavorID` in `networking/v2/extensions/lbaas_v2/loadbalancers` [GH-1744](https://github.com/gophercloud/gophercloud/pull/1744) -* The `go-yaml` dependency was updated to `v2.2.4` to fix possible DDOS vulnerabilities [GH-1751](https://github.com/gophercloud/gophercloud/pull/1751) - -## 0.5.0 (October 13, 2019) - -IMPROVEMENTS - -* Added `VolumeType` to `compute/v2/extensions/bootfromvolume.BlockDevice`[GH-1690](https://github.com/gophercloud/gophercloud/pull/1690) -* Added `networking/v2/extensions/layer3/portforwarding.List` [GH-1688](https://github.com/gophercloud/gophercloud/pull/1688) -* Added `networking/v2/extensions/layer3/portforwarding.Get` [GH-1698](https://github.com/gophercloud/gophercloud/pull/1696) -* Added `compute/v2/extensions/tags.ReplaceAll` [GH-1696](https://github.com/gophercloud/gophercloud/pull/1696) -* Added `compute/v2/extensions/tags.Add` [GH-1696](https://github.com/gophercloud/gophercloud/pull/1696) -* Added `networking/v2/extensions/layer3/portforwarding.Update` [GH-1703](https://github.com/gophercloud/gophercloud/pull/1703) -* Added `ExtractDomain` method to token results in `identity/v3/tokens` [GH-1712](https://github.com/gophercloud/gophercloud/pull/1712) -* Added `AllowedCIDRs` to `loadbalancer/v2/listeners.CreateOpts` [GH-1710](https://github.com/gophercloud/gophercloud/pull/1710) -* Added `AllowedCIDRs` to `loadbalancer/v2/listeners.UpdateOpts` [GH-1710](https://github.com/gophercloud/gophercloud/pull/1710) -* Added `AllowedCIDRs` to `loadbalancer/v2/listeners.Listener` [GH-1710](https://github.com/gophercloud/gophercloud/pull/1710) -* Added `compute/v2/extensions/tags.Add` [GH-1695](https://github.com/gophercloud/gophercloud/pull/1695) -* Added `compute/v2/extensions/tags.ReplaceAll` [GH-1694](https://github.com/gophercloud/gophercloud/pull/1694) -* Added `compute/v2/extensions/tags.Delete` [GH-1699](https://github.com/gophercloud/gophercloud/pull/1699) -* Added `compute/v2/extensions/tags.DeleteAll` [GH-1700](https://github.com/gophercloud/gophercloud/pull/1700) -* Added `ImageStatusImporting` as an image status [GH-1725](https://github.com/gophercloud/gophercloud/pull/1725) -* Added `ByPath` to `baremetalintrospection/v1/introspection.RootDiskType` [GH-1730](https://github.com/gophercloud/gophercloud/pull/1730) -* Added `AttachedVolumes` to `compute/v2/servers.Server` [GH-1732](https://github.com/gophercloud/gophercloud/pull/1732) -* Enable unmarshaling server tags to a `compute/v2/servers.Server` struct [GH-1734] -* Allow setting an empty members list in `loadbalancer/v2/pools.BatchUpdateMembers` [GH-1736](https://github.com/gophercloud/gophercloud/pull/1736) -* Allow unsetting members' subnet ID and name in `loadbalancer/v2/pools.BatchUpdateMemberOpts` [GH-1738](https://github.com/gophercloud/gophercloud/pull/1738) - -BUG FIXES - -* Changed struct type for options in `networking/v2/extensions/lbaas_v2/listeners` to `UpdateOptsBuilder` interface instead of specific UpdateOpts type [GH-1705](https://github.com/gophercloud/gophercloud/pull/1705) -* Changed struct type for options in `networking/v2/extensions/lbaas_v2/loadbalancers` to `UpdateOptsBuilder` interface instead of specific UpdateOpts type [GH-1706](https://github.com/gophercloud/gophercloud/pull/1706) -* Fixed issue with `blockstorage/v1/volumes.Create` where the response was expected to be 202 [GH-1720](https://github.com/gophercloud/gophercloud/pull/1720) -* Changed `DefaultTlsContainerRef` from `string` to `*string` in `loadbalancer/v2/listeners.UpdateOpts` to allow the value to be removed during update. [GH-1723](https://github.com/gophercloud/gophercloud/pull/1723) -* Changed `SniContainerRefs` from `[]string{}` to `*[]string{}` in `loadbalancer/v2/listeners.UpdateOpts` to allow the value to be removed during update. [GH-1723](https://github.com/gophercloud/gophercloud/pull/1723) -* Changed `DefaultTlsContainerRef` from `string` to `*string` in `networking/v2/extensions/lbaas_v2/listeners.UpdateOpts` to allow the value to be removed during update. [GH-1723](https://github.com/gophercloud/gophercloud/pull/1723) -* Changed `SniContainerRefs` from `[]string{}` to `*[]string{}` in `networking/v2/extensions/lbaas_v2/listeners.UpdateOpts` to allow the value to be removed during update. [GH-1723](https://github.com/gophercloud/gophercloud/pull/1723) - - -## 0.4.0 (September 3, 2019) - -IMPROVEMENTS - -* Added `blockstorage/extensions/quotasets.results.QuotaSet.Groups` [GH-1668](https://github.com/gophercloud/gophercloud/pull/1668) -* Added `blockstorage/extensions/quotasets.results.QuotaUsageSet.Groups` [GH-1668](https://github.com/gophercloud/gophercloud/pull/1668) -* Added `containerinfra/v1/clusters.CreateOpts.FixedNetwork` [GH-1674](https://github.com/gophercloud/gophercloud/pull/1674) -* Added `containerinfra/v1/clusters.CreateOpts.FixedSubnet` [GH-1676](https://github.com/gophercloud/gophercloud/pull/1676) -* Added `containerinfra/v1/clusters.CreateOpts.FloatingIPEnabled` [GH-1677](https://github.com/gophercloud/gophercloud/pull/1677) -* Added `CreatedAt` and `UpdatedAt` to `loadbalancers/v2/loadbalancers.LoadBalancer` [GH-1681](https://github.com/gophercloud/gophercloud/pull/1681) -* Added `networking/v2/extensions/layer3/portforwarding.Create` [GH-1651](https://github.com/gophercloud/gophercloud/pull/1651) -* Added `networking/v2/extensions/agents.ListDHCPNetworks` [GH-1686](https://github.com/gophercloud/gophercloud/pull/1686) -* Added `networking/v2/extensions/layer3/portforwarding.Delete` [GH-1652](https://github.com/gophercloud/gophercloud/pull/1652) -* Added `compute/v2/extensions/tags.List` [GH-1679](https://github.com/gophercloud/gophercloud/pull/1679) -* Added `compute/v2/extensions/tags.Check` [GH-1679](https://github.com/gophercloud/gophercloud/pull/1679) - -BUG FIXES - -* Changed `identity/v3/endpoints.ListOpts.RegionID` from `int` to `string` [GH-1664](https://github.com/gophercloud/gophercloud/pull/1664) -* Fixed issue where older time formats in some networking APIs/resources were unable to be parsed [GH-1671](https://github.com/gophercloud/gophercloud/pull/1664) -* Changed `SATA`, `SCSI`, and `SAS` types to `InterfaceType` in `baremetal/v1/nodes` [GH-1683] - -## 0.3.0 (July 31, 2019) - -IMPROVEMENTS - -* Added `baremetal/apiversions.List` [GH-1577](https://github.com/gophercloud/gophercloud/pull/1577) -* Added `baremetal/apiversions.Get` [GH-1577](https://github.com/gophercloud/gophercloud/pull/1577) -* Added `compute/v2/extensions/servergroups.CreateOpts.Policy` [GH-1636](https://github.com/gophercloud/gophercloud/pull/1636) -* Added `identity/v3/extensions/trusts.Create` [GH-1644](https://github.com/gophercloud/gophercloud/pull/1644) -* Added `identity/v3/extensions/trusts.Delete` [GH-1644](https://github.com/gophercloud/gophercloud/pull/1644) -* Added `CreatedAt` and `UpdatedAt` to `networking/v2/extensions/layer3/floatingips.FloatingIP` [GH-1647](https://github.com/gophercloud/gophercloud/issues/1646) -* Added `CreatedAt` and `UpdatedAt` to `networking/v2/extensions/security/groups.SecGroup` [GH-1654](https://github.com/gophercloud/gophercloud/issues/1654) -* Added `CreatedAt` and `UpdatedAt` to `networking/v2/networks.Network` [GH-1657](https://github.com/gophercloud/gophercloud/issues/1657) -* Added `keymanager/v1/containers.CreateSecretRef` [GH-1659](https://github.com/gophercloud/gophercloud/issues/1659) -* Added `keymanager/v1/containers.DeleteSecretRef` [GH-1659](https://github.com/gophercloud/gophercloud/issues/1659) -* Added `sharedfilesystems/v2/shares.GetMetadata` [GH-1656](https://github.com/gophercloud/gophercloud/issues/1656) -* Added `sharedfilesystems/v2/shares.GetMetadatum` [GH-1656](https://github.com/gophercloud/gophercloud/issues/1656) -* Added `sharedfilesystems/v2/shares.SetMetadata` [GH-1656](https://github.com/gophercloud/gophercloud/issues/1656) -* Added `sharedfilesystems/v2/shares.UpdateMetadata` [GH-1656](https://github.com/gophercloud/gophercloud/issues/1656) -* Added `sharedfilesystems/v2/shares.DeleteMetadatum` [GH-1656](https://github.com/gophercloud/gophercloud/issues/1656) -* Added `sharedfilesystems/v2/sharetypes.IDFromName` [GH-1662](https://github.com/gophercloud/gophercloud/issues/1662) - - - -BUG FIXES - -* Changed `baremetal/v1/nodes.CleanStep.Args` from `map[string]string` to `map[string]interface{}` [GH-1638](https://github.com/gophercloud/gophercloud/pull/1638) -* Removed `URLPath` and `ExpectedCodes` from `loadbalancer/v2/monitors.ToMonitorCreateMap` since Octavia now provides default values when these fields are not specified [GH-1640](https://github.com/gophercloud/gophercloud/pull/1540) - - -## 0.2.0 (June 17, 2019) - -IMPROVEMENTS - -* Added `networking/v2/extensions/qos/rules.ListBandwidthLimitRules` [GH-1584](https://github.com/gophercloud/gophercloud/pull/1584) -* Added `networking/v2/extensions/qos/rules.GetBandwidthLimitRule` [GH-1584](https://github.com/gophercloud/gophercloud/pull/1584) -* Added `networking/v2/extensions/qos/rules.CreateBandwidthLimitRule` [GH-1584](https://github.com/gophercloud/gophercloud/pull/1584) -* Added `networking/v2/extensions/qos/rules.UpdateBandwidthLimitRule` [GH-1589](https://github.com/gophercloud/gophercloud/pull/1589) -* Added `networking/v2/extensions/qos/rules.DeleteBandwidthLimitRule` [GH-1590](https://github.com/gophercloud/gophercloud/pull/1590) -* Added `networking/v2/extensions/qos/policies.List` [GH-1591](https://github.com/gophercloud/gophercloud/pull/1591) -* Added `networking/v2/extensions/qos/policies.Get` [GH-1593](https://github.com/gophercloud/gophercloud/pull/1593) -* Added `networking/v2/extensions/qos/rules.ListDSCPMarkingRules` [GH-1594](https://github.com/gophercloud/gophercloud/pull/1594) -* Added `networking/v2/extensions/qos/policies.Create` [GH-1595](https://github.com/gophercloud/gophercloud/pull/1595) -* Added `compute/v2/extensions/diagnostics.Get` [GH-1592](https://github.com/gophercloud/gophercloud/pull/1592) -* Added `networking/v2/extensions/qos/policies.Update` [GH-1603](https://github.com/gophercloud/gophercloud/pull/1603) -* Added `networking/v2/extensions/qos/policies.Delete` [GH-1603](https://github.com/gophercloud/gophercloud/pull/1603) -* Added `networking/v2/extensions/qos/rules.CreateDSCPMarkingRule` [GH-1605](https://github.com/gophercloud/gophercloud/pull/1605) -* Added `networking/v2/extensions/qos/rules.UpdateDSCPMarkingRule` [GH-1605](https://github.com/gophercloud/gophercloud/pull/1605) -* Added `networking/v2/extensions/qos/rules.GetDSCPMarkingRule` [GH-1609](https://github.com/gophercloud/gophercloud/pull/1609) -* Added `networking/v2/extensions/qos/rules.DeleteDSCPMarkingRule` [GH-1609](https://github.com/gophercloud/gophercloud/pull/1609) -* Added `networking/v2/extensions/qos/rules.ListMinimumBandwidthRules` [GH-1615](https://github.com/gophercloud/gophercloud/pull/1615) -* Added `networking/v2/extensions/qos/rules.GetMinimumBandwidthRule` [GH-1615](https://github.com/gophercloud/gophercloud/pull/1615) -* Added `networking/v2/extensions/qos/rules.CreateMinimumBandwidthRule` [GH-1615](https://github.com/gophercloud/gophercloud/pull/1615) -* Added `Hostname` to `baremetalintrospection/v1/introspection.Data` [GH-1627](https://github.com/gophercloud/gophercloud/pull/1627) -* Added `networking/v2/extensions/qos/rules.UpdateMinimumBandwidthRule` [GH-1624](https://github.com/gophercloud/gophercloud/pull/1624) -* Added `networking/v2/extensions/qos/rules.DeleteMinimumBandwidthRule` [GH-1624](https://github.com/gophercloud/gophercloud/pull/1624) -* Added `networking/v2/extensions/qos/ruletypes.GetRuleType` [GH-1625](https://github.com/gophercloud/gophercloud/pull/1625) -* Added `Extra` to `baremetalintrospection/v1/introspection.Data` [GH-1611](https://github.com/gophercloud/gophercloud/pull/1611) -* Added `blockstorage/extensions/volumeactions.SetImageMetadata` [GH-1621](https://github.com/gophercloud/gophercloud/pull/1621) - -BUG FIXES - -* Updated `networking/v2/extensions/qos/rules.UpdateBandwidthLimitRule` to use return code 200 [GH-1606](https://github.com/gophercloud/gophercloud/pull/1606) -* Fixed bug in `compute/v2/extensions/schedulerhints.SchedulerHints.Query` where contents will now be marshalled to a string [GH-1620](https://github.com/gophercloud/gophercloud/pull/1620) - -## 0.1.0 (May 27, 2019) - -Initial tagged release. diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE deleted file mode 100644 index fbbbc9e4cba..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md deleted file mode 100644 index 95539563ace..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/README.md +++ /dev/null @@ -1,166 +0,0 @@ -# Gophercloud: an OpenStack SDK for Go -[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) -[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) - -Gophercloud is an OpenStack Go SDK. - -## Useful links - -* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) -* [Effective Go](https://golang.org/doc/effective_go.html) - -## How to install - -Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) -is pointing to an appropriate directory where you want to install Gophercloud: - -```bash -mkdir $HOME/go -export GOPATH=$HOME/go -``` - -To protect yourself against changes in your dependencies, we highly recommend choosing a -[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for -your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install -Gophercloud as a dependency like so: - -```bash -go get github.com/gophercloud/gophercloud - -# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" - -godep save ./... -``` - -This will install all the source files you need into a `Godeps/_workspace` directory, which is -referenceable from your own source files when you use the `godep go` command. - -## Getting started - -### Credentials - -Because you'll be hitting an API, you will need to retrieve your OpenStack -credentials and either store them as environment variables or in your local Go -files. The first method is recommended because it decouples credential -information from source code, allowing you to push the latter to your version -control system without any security risk. - -You will need to retrieve the following: - -* username -* password -* a valid Keystone identity URL - -For users that have the OpenStack dashboard installed, there's a shortcut. If -you visit the `project/access_and_security` path in Horizon and click on the -"Download OpenStack RC File" button at the top right hand corner, you will -download a bash file that exports all of your access details to environment -variables. To execute the file, run `source admin-openrc.sh` and you will be -prompted for your password. - -### Authentication - -> NOTE: It is now recommended to use the `clientconfig` package found at -> https://github.com/gophercloud/utils/tree/master/openstack/clientconfig -> for all authentication purposes. -> -> The below documentation is still relevant. clientconfig simply implements -> the below and presents it in an easier and more flexible way. - -Once you have access to your credentials, you can begin plugging them into -Gophercloud. The next step is authentication, and this is handled by a base -"Provider" struct. To get one, you can either pass in your credentials -explicitly, or tell Gophercloud to use environment variables: - -```go -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -// Option 1: Pass in the values yourself -opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", -} - -// Option 2: Use a utility function to retrieve all your environment variables -opts, err := openstack.AuthOptionsFromEnv() -``` - -Once you have the `opts` variable, you can pass it in and get back a -`ProviderClient` struct: - -```go -provider, err := openstack.AuthenticatedClient(opts) -``` - -The `ProviderClient` is the top-level client that all of your OpenStack services -derive from. The provider contains all of the authentication details that allow -your Go code to access the API - such as the base URL and token ID. - -### Provision a server - -Once we have a base Provider, we inject it as a dependency into each OpenStack -service. In order to work with the Compute API, we need a Compute service -client; which can be created like so: - -```go -client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), -}) -``` - -We then use this `client` for any Compute API operation we want. In our case, -we want to provision a new server - so we invoke the `Create` method and pass -in the flavor ID (hardware specification) and image ID (operating system) we're -interested in: - -```go -import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" - -server, err := servers.Create(client, servers.CreateOpts{ - Name: "My new server!", - FlavorRef: "flavor_id", - ImageRef: "image_id", -}).Extract() -``` - -The above code sample creates a new server with the parameters, and embodies the -new resource in the `server` variable (a -[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). - -## Advanced Usage - -Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. - -## Backwards-Compatibility Guarantees - -None. Vendor it and write tests covering the parts you use. - -## Contributing - -See the [contributing guide](./.github/CONTRIBUTING.md). - -## Help and feedback - -If you're struggling with something or have spotted a potential bug, feel free -to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). - -## Thank You - -We'd like to extend special thanks and appreciation to the following: - -### OpenLab - - - -OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. - -### VEXXHOST - - - -VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go deleted file mode 100644 index 4f301305e63..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/auth_options.go +++ /dev/null @@ -1,514 +0,0 @@ -package gophercloud - -/* -AuthOptions stores information needed to authenticate to an OpenStack Cloud. -You can populate one manually, or use a provider's AuthOptionsFromEnv() function -to read relevant information from the standard environment variables. Pass one -to a provider's AuthenticatedClient function to authenticate and obtain a -ProviderClient representing an active session on that provider. - -Its fields are the union of those recognized by each identity implementation and -provider. - -An example of manually providing authentication information: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -An example of using AuthOptionsFromEnv(), where the environment variables can -be read from a file, such as a standard openrc file: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed by - // all of the identity services, it will often be populated by a provider-level - // function. - // - // The IdentityEndpoint is typically referred to as the "auth_url" or - // "OS_AUTH_URL" in the information provided by the cloud operator. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"-"` - - Password string `json:"password,omitempty"` - - // Passcode is used in TOTP authentication method - Passcode string `json:"passcode,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // The same fields are known as project_id and project_name in the Identity - // V3 API, but are collected as TenantID and TenantName here in both cases. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - // If DomainID or DomainName are provided, they will also apply to TenantName. - // It is not currently possible to authenticate with Username and a Domain - // and scope to a Project in a different Domain by using TenantName. To - // accomplish that, the ProjectID will need to be provided as the TenantID - // option. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud to - // cache your credentials in memory, and to allow Gophercloud to attempt to - // re-authenticate automatically if/when your token expires. If you set it to - // false, it will not cache these settings, but re-authentication will not be - // possible. This setting defaults to false. - // - // NOTE: The reauth function will try to re-authenticate endlessly if left - // unchecked. The way to limit the number of attempts is to provide a custom - // HTTP client to the provider client and provide a transport that implements - // the RoundTripper interface and stores the number of failed retries. For an - // example of this, see here: - // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - // Scope determines the scoping of the authentication request. - Scope *AuthScope `json:"-"` - - // Authentication through Application Credentials requires supplying name, project and secret - // For project we can use TenantID - ApplicationCredentialID string `json:"-"` - ApplicationCredentialName string `json:"-"` - ApplicationCredentialSecret string `json:"-"` -} - -// AuthScope allows a created token to be limited to a specific domain or project. -type AuthScope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string - System bool -} - -// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v2 tokens package -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - // Populate the request map. - authMap := make(map[string]interface{}) - - if opts.Username != "" { - if opts.Password != "" { - authMap["passwordCredentials"] = map[string]interface{}{ - "username": opts.Username, - "password": opts.Password, - } - } else { - return nil, ErrMissingInput{Argument: "Password"} - } - } else if opts.TokenID != "" { - authMap["token"] = map[string]interface{}{ - "id": opts.TokenID, - } - } else { - return nil, ErrMissingInput{Argument: "Username"} - } - - if opts.TenantID != "" { - authMap["tenantId"] = opts.TenantID - } - if opts.TenantName != "" { - authMap["tenantName"] = opts.TenantName - } - - return map[string]interface{}{"auth": authMap}, nil -} - -// ToTokenV3CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v3 tokens package -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - type domainReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - } - - type projectReq struct { - Domain *domainReq `json:"domain,omitempty"` - Name *string `json:"name,omitempty"` - ID *string `json:"id,omitempty"` - } - - type userReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - Password *string `json:"password,omitempty"` - Passcode *string `json:"passcode,omitempty"` - Domain *domainReq `json:"domain,omitempty"` - } - - type passwordReq struct { - User userReq `json:"user"` - } - - type tokenReq struct { - ID string `json:"id"` - } - - type applicationCredentialReq struct { - ID *string `json:"id,omitempty"` - Name *string `json:"name,omitempty"` - User *userReq `json:"user,omitempty"` - Secret *string `json:"secret,omitempty"` - } - - type totpReq struct { - User *userReq `json:"user,omitempty"` - } - - type identityReq struct { - Methods []string `json:"methods"` - Password *passwordReq `json:"password,omitempty"` - Token *tokenReq `json:"token,omitempty"` - ApplicationCredential *applicationCredentialReq `json:"application_credential,omitempty"` - TOTP *totpReq `json:"totp,omitempty"` - } - - type authReq struct { - Identity identityReq `json:"identity"` - } - - type request struct { - Auth authReq `json:"auth"` - } - - // Populate the request structure based on the provided arguments. Create and return an error - // if insufficient or incompatible information is present. - var req request - - if opts.Password == "" && opts.Passcode == "" { - if opts.TokenID != "" { - // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication - // parameters. - if opts.Username != "" { - return nil, ErrUsernameWithToken{} - } - if opts.UserID != "" { - return nil, ErrUserIDWithToken{} - } - if opts.DomainID != "" { - return nil, ErrDomainIDWithToken{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithToken{} - } - - // Configure the request for Token authentication. - req.Auth.Identity.Methods = []string{"token"} - req.Auth.Identity.Token = &tokenReq{ - ID: opts.TokenID, - } - - } else if opts.ApplicationCredentialID != "" { - // Configure the request for ApplicationCredentialID authentication. - // https://github.com/openstack/keystoneauth/blob/stable/rocky/keystoneauth1/identity/v3/application_credential.py#L48-L67 - // There are three kinds of possible application_credential requests - // 1. application_credential id + secret - // 2. application_credential name + secret + user_id - // 3. application_credential name + secret + username + domain_id / domain_name - if opts.ApplicationCredentialSecret == "" { - return nil, ErrAppCredMissingSecret{} - } - req.Auth.Identity.Methods = []string{"application_credential"} - req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ - ID: &opts.ApplicationCredentialID, - Secret: &opts.ApplicationCredentialSecret, - } - } else if opts.ApplicationCredentialName != "" { - if opts.ApplicationCredentialSecret == "" { - return nil, ErrAppCredMissingSecret{} - } - - var userRequest *userReq - - if opts.UserID != "" { - // UserID could be used without the domain information - userRequest = &userReq{ - ID: &opts.UserID, - } - } - - if userRequest == nil && opts.Username == "" { - // Make sure that Username or UserID are provided - return nil, ErrUsernameOrUserID{} - } - - if userRequest == nil && opts.DomainID != "" { - userRequest = &userReq{ - Name: &opts.Username, - Domain: &domainReq{ID: &opts.DomainID}, - } - } - - if userRequest == nil && opts.DomainName != "" { - userRequest = &userReq{ - Name: &opts.Username, - Domain: &domainReq{Name: &opts.DomainName}, - } - } - - // Make sure that DomainID or DomainName are provided among Username - if userRequest == nil { - return nil, ErrDomainIDOrDomainName{} - } - - req.Auth.Identity.Methods = []string{"application_credential"} - req.Auth.Identity.ApplicationCredential = &applicationCredentialReq{ - Name: &opts.ApplicationCredentialName, - User: userRequest, - Secret: &opts.ApplicationCredentialSecret, - } - } else { - // If no password or token ID or ApplicationCredential are available, authentication can't continue. - return nil, ErrMissingPassword{} - } - } else { - // Password authentication. - if opts.Password != "" { - req.Auth.Identity.Methods = append(req.Auth.Identity.Methods, "password") - } - - // TOTP authentication. - if opts.Passcode != "" { - req.Auth.Identity.Methods = append(req.Auth.Identity.Methods, "totp") - } - - // At least one of Username and UserID must be specified. - if opts.Username == "" && opts.UserID == "" { - return nil, ErrUsernameOrUserID{} - } - - if opts.Username != "" { - // If Username is provided, UserID may not be provided. - if opts.UserID != "" { - return nil, ErrUsernameOrUserID{} - } - - // Either DomainID or DomainName must also be specified. - if opts.DomainID == "" && opts.DomainName == "" { - return nil, ErrDomainIDOrDomainName{} - } - - if opts.DomainID != "" { - if opts.DomainName != "" { - return nil, ErrDomainIDOrDomainName{} - } - - // Configure the request for Username and Password authentication with a DomainID. - if opts.Password != "" { - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: &opts.Password, - Domain: &domainReq{ID: &opts.DomainID}, - }, - } - } - if opts.Passcode != "" { - req.Auth.Identity.TOTP = &totpReq{ - User: &userReq{ - Name: &opts.Username, - Passcode: &opts.Passcode, - Domain: &domainReq{ID: &opts.DomainID}, - }, - } - } - } - - if opts.DomainName != "" { - // Configure the request for Username and Password authentication with a DomainName. - if opts.Password != "" { - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - Name: &opts.Username, - Password: &opts.Password, - Domain: &domainReq{Name: &opts.DomainName}, - }, - } - } - - if opts.Passcode != "" { - req.Auth.Identity.TOTP = &totpReq{ - User: &userReq{ - Name: &opts.Username, - Passcode: &opts.Passcode, - Domain: &domainReq{Name: &opts.DomainName}, - }, - } - } - } - } - - if opts.UserID != "" { - // If UserID is specified, neither DomainID nor DomainName may be. - if opts.DomainID != "" { - return nil, ErrDomainIDWithUserID{} - } - if opts.DomainName != "" { - return nil, ErrDomainNameWithUserID{} - } - - // Configure the request for UserID and Password authentication. - if opts.Password != "" { - req.Auth.Identity.Password = &passwordReq{ - User: userReq{ - ID: &opts.UserID, - Password: &opts.Password, - }, - } - } - - if opts.Passcode != "" { - req.Auth.Identity.TOTP = &totpReq{ - User: &userReq{ - ID: &opts.UserID, - Passcode: &opts.Passcode, - }, - } - } - } - } - - b, err := BuildRequestBody(req, "") - if err != nil { - return nil, err - } - - if len(scope) != 0 { - b["auth"].(map[string]interface{})["scope"] = scope - } - - return b, nil -} - -// ToTokenV3ScopeMap builds a scope from AuthOptions and satisfies interface in -// the v3 tokens package. -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - // For backwards compatibility. - // If AuthOptions.Scope was not set, try to determine it. - // This works well for common scenarios. - if opts.Scope == nil { - opts.Scope = new(AuthScope) - if opts.TenantID != "" { - opts.Scope.ProjectID = opts.TenantID - } else { - if opts.TenantName != "" { - opts.Scope.ProjectName = opts.TenantName - opts.Scope.DomainID = opts.DomainID - opts.Scope.DomainName = opts.DomainName - } - } - } - - if opts.Scope.System { - return map[string]interface{}{ - "system": map[string]interface{}{ - "all": true, - }, - }, nil - } - - if opts.Scope.ProjectName != "" { - // ProjectName provided: either DomainID or DomainName must also be supplied. - // ProjectID may not be supplied. - if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - if opts.Scope.ProjectID != "" { - return nil, ErrScopeProjectIDOrProjectName{} - } - - if opts.Scope.DomainID != "" { - // ProjectName + DomainID - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, - }, - }, nil - } - - if opts.Scope.DomainName != "" { - // ProjectName + DomainName - return map[string]interface{}{ - "project": map[string]interface{}{ - "name": &opts.Scope.ProjectName, - "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, - }, - }, nil - } - } else if opts.Scope.ProjectID != "" { - // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. - if opts.Scope.DomainID != "" { - return nil, ErrScopeProjectIDAlone{} - } - if opts.Scope.DomainName != "" { - return nil, ErrScopeProjectIDAlone{} - } - - // ProjectID - return map[string]interface{}{ - "project": map[string]interface{}{ - "id": &opts.Scope.ProjectID, - }, - }, nil - } else if opts.Scope.DomainID != "" { - // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. - if opts.Scope.DomainName != "" { - return nil, ErrScopeDomainIDOrDomainName{} - } - - // DomainID - return map[string]interface{}{ - "domain": map[string]interface{}{ - "id": &opts.Scope.DomainID, - }, - }, nil - } else if opts.Scope.DomainName != "" { - // DomainName - return map[string]interface{}{ - "domain": map[string]interface{}{ - "name": &opts.Scope.DomainName, - }, - }, nil - } - - return nil, nil -} - -func (opts AuthOptions) CanReauth() bool { - if opts.Passcode != "" { - // cannot reauth using TOTP passcode - return false - } - - return opts.AllowReauth -} - -// ToTokenV3HeadersMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v3 tokens package. -func (opts *AuthOptions) ToTokenV3HeadersMap(map[string]interface{}) (map[string]string, error) { - return nil, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/auth_result.go b/vendor/github.com/gophercloud/gophercloud/auth_result.go deleted file mode 100644 index 2e4699b978c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/auth_result.go +++ /dev/null @@ -1,52 +0,0 @@ -package gophercloud - -/* -AuthResult is the result from the request that was used to obtain a provider -client's Keystone token. It is returned from ProviderClient.GetAuthResult(). - -The following types satisfy this interface: - - github.com/gophercloud/gophercloud/openstack/identity/v2/tokens.CreateResult - github.com/gophercloud/gophercloud/openstack/identity/v3/tokens.CreateResult - -Usage example: - - import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - ) - - func GetAuthenticatedUserID(providerClient *gophercloud.ProviderClient) (string, error) { - r := providerClient.GetAuthResult() - if r == nil { - //ProviderClient did not use openstack.Authenticate(), e.g. because token - //was set manually with ProviderClient.SetToken() - return "", errors.New("no AuthResult available") - } - switch r := r.(type) { - case tokens2.CreateResult: - u, err := r.ExtractUser() - if err != nil { - return "", err - } - return u.ID, nil - case tokens3.CreateResult: - u, err := r.ExtractUser() - if err != nil { - return "", err - } - return u.ID, nil - default: - panic(fmt.Sprintf("got unexpected AuthResult type %t", r)) - } - } - -Both implementing types share a lot of methods by name, like ExtractUser() in -this example. But those methods cannot be part of the AuthResult interface -because the return types are different (in this case, type tokens2.User vs. -type tokens3.User). -*/ -type AuthResult interface { - ExtractTokenID() (string, error) -} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go deleted file mode 100644 index 953ca822a97..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/doc.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Package gophercloud provides a multi-vendor interface to OpenStack-compatible -clouds. The library has a three-level hierarchy: providers, services, and -resources. - -Authenticating with Providers - -Provider structs represent the cloud providers that offer and manage a -collection of services. You will generally want to create one Provider -client per OpenStack cloud. - - It is now recommended to use the `clientconfig` package found at - https://github.com/gophercloud/utils/tree/master/openstack/clientconfig - for all authentication purposes. - - The below documentation is still relevant. clientconfig simply implements - the below and presents it in an easier and more flexible way. - -Use your OpenStack credentials to create a Provider client. The -IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in -information provided by the cloud operator. Additionally, the cloud may refer to -TenantID or TenantName as project_id and project_name. Credentials are -specified like so: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - Username: "{username}", - Password: "{password}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You can authenticate with a token by doing: - - opts := gophercloud.AuthOptions{ - IdentityEndpoint: "https://openstack.example.com:5000/v2.0", - TokenID: "{token_id}", - TenantID: "{tenant_id}", - } - - provider, err := openstack.AuthenticatedClient(opts) - -You may also use the openstack.AuthOptionsFromEnv() helper function. This -function reads in standard environment variables frequently found in an -OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" -instead of "project". - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) - -Service Clients - -Service structs are specific to a provider and handle all of the logic and -operations for a particular OpenStack service. Examples of services include: -Compute, Object Storage, Block Storage. In order to define one, you need to -pass in the parent provider, like so: - - opts := gophercloud.EndpointOpts{Region: "RegionOne"} - - client, err := openstack.NewComputeV2(provider, opts) - -Resources - -Resource structs are the domain models that services make use of in order -to work with and represent the state of API resources: - - server, err := servers.Get(client, "{serverId}").Extract() - -Intermediate Result structs are returned for API operations, which allow -generic access to the HTTP headers, response body, and any errors associated -with the network transaction. To turn a result into a usable resource struct, -you must call the Extract method which is chained to the response, or an -Extract function from an applicable extension: - - result := servers.Get(client, "{serverId}") - - // Attempt to extract the disk configuration from the OS-DCF disk config - // extension: - config, err := diskconfig.ExtractGet(result) - -All requests that enumerate a collection return a Pager struct that is used to -iterate through the results one page at a time. Use the EachPage method on that -Pager to handle each successive Page in a closure, then use the appropriate -extraction method from that request's package to interpret that Page as a slice -of results: - - err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { - s, err := servers.ExtractServers(page) - if err != nil { - return false, err - } - - // Handle the []servers.Server slice. - - // Return "false" or an error to prematurely stop fetching new pages. - return true, nil - }) - -If you want to obtain the entire collection of pages without doing any -intermediary processing on each page, you can use the AllPages method: - - allPages, err := servers.List(client, nil).AllPages() - allServers, err := servers.ExtractServers(allPages) - -This top-level package contains utility functions and data types that are used -throughout the provider and service packages. Of particular note for end users -are the AuthOptions and EndpointOpts structs. -*/ -package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go deleted file mode 100644 index 2fbc3c97f14..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go +++ /dev/null @@ -1,76 +0,0 @@ -package gophercloud - -// Availability indicates to whom a specific service endpoint is accessible: -// the internet at large, internal networks only, or only to administrators. -// Different identity services use different terminology for these. Identity v2 -// lists them as different kinds of URLs within the service catalog ("adminURL", -// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an -// endpoint's response. -type Availability string - -const ( - // AvailabilityAdmin indicates that an endpoint is only available to - // administrators. - AvailabilityAdmin Availability = "admin" - - // AvailabilityPublic indicates that an endpoint is available to everyone on - // the internet. - AvailabilityPublic Availability = "public" - - // AvailabilityInternal indicates that an endpoint is only available within - // the cluster's internal network. - AvailabilityInternal Availability = "internal" -) - -// EndpointOpts specifies search criteria used by queries against an -// OpenStack service catalog. The options must contain enough information to -// unambiguously identify one, and only one, endpoint within the catalog. -// -// Usually, these are passed to service client factory functions in a provider -// package, like "openstack.NewComputeV2()". -type EndpointOpts struct { - // Type [required] is the service type for the client (e.g., "compute", - // "object-store"). Generally, this will be supplied by the service client - // function, but a user-given value will be honored if provided. - Type string - - // Name [optional] is the service name for the client (e.g., "nova") as it - // appears in the service catalog. Services can have the same Type but a - // different Name, which is why both Type and Name are sometimes needed. - Name string - - // Region [required] is the geographic region in which the endpoint resides, - // generally specifying which datacenter should house your resources. - // Required only for services that span multiple regions. - Region string - - // Availability [optional] is the visibility of the endpoint to be returned. - // Valid types include the constants AvailabilityPublic, AvailabilityInternal, - // or AvailabilityAdmin from this package. - // - // Availability is not required, and defaults to AvailabilityPublic. Not all - // providers or services offer all Availability options. - Availability Availability -} - -/* -EndpointLocator is an internal function to be used by provider implementations. - -It provides an implementation that locates a single endpoint from a service -catalog for a specific ProviderClient based on user-provided EndpointOpts. The -provider then uses it to discover related ServiceClients. -*/ -type EndpointLocator func(EndpointOpts) (string, error) - -// ApplyDefaults is an internal method to be used by provider implementations. -// -// It sets EndpointOpts fields if not already set, including a default type. -// Currently, EndpointOpts.Availability defaults to the public endpoint. -func (eo *EndpointOpts) ApplyDefaults(t string) { - if eo.Type == "" { - eo.Type = t - } - if eo.Availability == "" { - eo.Availability = AvailabilityPublic - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go deleted file mode 100644 index 77cabf6a924..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/errors.go +++ /dev/null @@ -1,490 +0,0 @@ -package gophercloud - -import ( - "fmt" - "net/http" - "strings" -) - -// BaseError is an error type that all other error types embed. -type BaseError struct { - DefaultErrString string - Info string -} - -func (e BaseError) Error() string { - e.DefaultErrString = "An error occurred while executing a Gophercloud request." - return e.choseErrString() -} - -func (e BaseError) choseErrString() string { - if e.Info != "" { - return e.Info - } - return e.DefaultErrString -} - -// ErrMissingInput is the error when input is required in a particular -// situation but not provided by the user -type ErrMissingInput struct { - BaseError - Argument string -} - -func (e ErrMissingInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) - return e.choseErrString() -} - -// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. -type ErrInvalidInput struct { - ErrMissingInput - Value interface{} -} - -func (e ErrInvalidInput) Error() string { - e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) - return e.choseErrString() -} - -// ErrMissingEnvironmentVariable is the error when environment variable is required -// in a particular situation but not provided by the user -type ErrMissingEnvironmentVariable struct { - BaseError - EnvironmentVariable string -} - -func (e ErrMissingEnvironmentVariable) Error() string { - e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) - return e.choseErrString() -} - -// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables -// is required in a particular situation but not provided by the user -type ErrMissingAnyoneOfEnvironmentVariables struct { - BaseError - EnvironmentVariables []string -} - -func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Missing one of the following environment variables [%s]", - strings.Join(e.EnvironmentVariables, ", "), - ) - return e.choseErrString() -} - -// ErrUnexpectedResponseCode is returned by the Request method when a response code other than -// those listed in OkCodes is encountered. -type ErrUnexpectedResponseCode struct { - BaseError - URL string - Method string - Expected []int - Actual int - Body []byte - ResponseHeader http.Header -} - -func (e ErrUnexpectedResponseCode) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", - e.Expected, e.Method, e.URL, e.Actual, e.Body, - ) - return e.choseErrString() -} - -// GetStatusCode returns the actual status code of the error. -func (e ErrUnexpectedResponseCode) GetStatusCode() int { - return e.Actual -} - -// StatusCodeError is a convenience interface to easily allow access to the -// status code field of the various ErrDefault* types. -// -// By using this interface, you only have to make a single type cast of -// the returned error to err.(StatusCodeError) and then call GetStatusCode() -// instead of having a large switch statement checking for each of the -// ErrDefault* types. -type StatusCodeError interface { - Error() string - GetStatusCode() int -} - -// ErrDefault400 is the default error type returned on a 400 HTTP response code. -type ErrDefault400 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault401 is the default error type returned on a 401 HTTP response code. -type ErrDefault401 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault403 is the default error type returned on a 403 HTTP response code. -type ErrDefault403 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault404 is the default error type returned on a 404 HTTP response code. -type ErrDefault404 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault405 is the default error type returned on a 405 HTTP response code. -type ErrDefault405 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault408 is the default error type returned on a 408 HTTP response code. -type ErrDefault408 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault409 is the default error type returned on a 409 HTTP response code. -type ErrDefault409 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault429 is the default error type returned on a 429 HTTP response code. -type ErrDefault429 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault500 is the default error type returned on a 500 HTTP response code. -type ErrDefault500 struct { - ErrUnexpectedResponseCode -} - -// ErrDefault503 is the default error type returned on a 503 HTTP response code. -type ErrDefault503 struct { - ErrUnexpectedResponseCode -} - -func (e ErrDefault400) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Bad request with: [%s %s], error message: %s", - e.Method, e.URL, e.Body, - ) - return e.choseErrString() -} -func (e ErrDefault401) Error() string { - return "Authentication failed" -} -func (e ErrDefault403) Error() string { - e.DefaultErrString = fmt.Sprintf( - "Request forbidden: [%s %s], error message: %s", - e.Method, e.URL, e.Body, - ) - return e.choseErrString() -} -func (e ErrDefault404) Error() string { - return "Resource not found" -} -func (e ErrDefault405) Error() string { - return "Method not allowed" -} -func (e ErrDefault408) Error() string { - return "The server timed out waiting for the request" -} -func (e ErrDefault429) Error() string { - return "Too many requests have been sent in a given amount of time. Pause" + - " requests, wait up to one minute, and try again." -} -func (e ErrDefault500) Error() string { - return "Internal Server Error" -} -func (e ErrDefault503) Error() string { - return "The service is currently unable to handle the request due to a temporary" + - " overloading or maintenance. This is a temporary condition. Try again later." -} - -// Err400er is the interface resource error types implement to override the error message -// from a 400 error. -type Err400er interface { - Error400(ErrUnexpectedResponseCode) error -} - -// Err401er is the interface resource error types implement to override the error message -// from a 401 error. -type Err401er interface { - Error401(ErrUnexpectedResponseCode) error -} - -// Err403er is the interface resource error types implement to override the error message -// from a 403 error. -type Err403er interface { - Error403(ErrUnexpectedResponseCode) error -} - -// Err404er is the interface resource error types implement to override the error message -// from a 404 error. -type Err404er interface { - Error404(ErrUnexpectedResponseCode) error -} - -// Err405er is the interface resource error types implement to override the error message -// from a 405 error. -type Err405er interface { - Error405(ErrUnexpectedResponseCode) error -} - -// Err408er is the interface resource error types implement to override the error message -// from a 408 error. -type Err408er interface { - Error408(ErrUnexpectedResponseCode) error -} - -// Err409er is the interface resource error types implement to override the error message -// from a 409 error. -type Err409er interface { - Error409(ErrUnexpectedResponseCode) error -} - -// Err429er is the interface resource error types implement to override the error message -// from a 429 error. -type Err429er interface { - Error429(ErrUnexpectedResponseCode) error -} - -// Err500er is the interface resource error types implement to override the error message -// from a 500 error. -type Err500er interface { - Error500(ErrUnexpectedResponseCode) error -} - -// Err503er is the interface resource error types implement to override the error message -// from a 503 error. -type Err503er interface { - Error503(ErrUnexpectedResponseCode) error -} - -// ErrTimeOut is the error type returned when an operations times out. -type ErrTimeOut struct { - BaseError -} - -func (e ErrTimeOut) Error() string { - e.DefaultErrString = "A time out occurred" - return e.choseErrString() -} - -// ErrUnableToReauthenticate is the error type returned when reauthentication fails. -type ErrUnableToReauthenticate struct { - BaseError - ErrOriginal error -} - -func (e ErrUnableToReauthenticate) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrErrorAfterReauthentication is the error type returned when reauthentication -// succeeds, but an error occurs afterword (usually an HTTP error). -type ErrErrorAfterReauthentication struct { - BaseError - ErrOriginal error -} - -func (e ErrErrorAfterReauthentication) Error() string { - e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) - return e.choseErrString() -} - -// ErrServiceNotFound is returned when no service in a service catalog matches -// the provided EndpointOpts. This is generally returned by provider service -// factory methods like "NewComputeV2()" and can mean that a service is not -// enabled for your account. -type ErrServiceNotFound struct { - BaseError -} - -func (e ErrServiceNotFound) Error() string { - e.DefaultErrString = "No suitable service could be found in the service catalog." - return e.choseErrString() -} - -// ErrEndpointNotFound is returned when no available endpoints match the -// provided EndpointOpts. This is also generally returned by provider service -// factory methods, and usually indicates that a region was specified -// incorrectly. -type ErrEndpointNotFound struct { - BaseError -} - -func (e ErrEndpointNotFound) Error() string { - e.DefaultErrString = "No suitable endpoint could be found in the service catalog." - return e.choseErrString() -} - -// ErrResourceNotFound is the error when trying to retrieve a resource's -// ID by name and the resource doesn't exist. -type ErrResourceNotFound struct { - BaseError - Name string - ResourceType string -} - -func (e ErrResourceNotFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrMultipleResourcesFound is the error when trying to retrieve a resource's -// ID by name and multiple resources have the user-provided name. -type ErrMultipleResourcesFound struct { - BaseError - Name string - Count int - ResourceType string -} - -func (e ErrMultipleResourcesFound) Error() string { - e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) - return e.choseErrString() -} - -// ErrUnexpectedType is the error when an unexpected type is encountered -type ErrUnexpectedType struct { - BaseError - Expected string - Actual string -} - -func (e ErrUnexpectedType) Error() string { - e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) - return e.choseErrString() -} - -func unacceptedAttributeErr(attribute string) string { - return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) -} - -func redundantWithTokenErr(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) -} - -func redundantWithUserID(attribute string) string { - return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) -} - -// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. -type ErrAPIKeyProvided struct{ BaseError } - -func (e ErrAPIKeyProvided) Error() string { - return unacceptedAttributeErr("APIKey") -} - -// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. -type ErrTenantIDProvided struct{ BaseError } - -func (e ErrTenantIDProvided) Error() string { - return unacceptedAttributeErr("TenantID") -} - -// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. -type ErrTenantNameProvided struct{ BaseError } - -func (e ErrTenantNameProvided) Error() string { - return unacceptedAttributeErr("TenantName") -} - -// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. -type ErrUsernameWithToken struct{ BaseError } - -func (e ErrUsernameWithToken) Error() string { - return redundantWithTokenErr("Username") -} - -// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. -type ErrUserIDWithToken struct{ BaseError } - -func (e ErrUserIDWithToken) Error() string { - return redundantWithTokenErr("UserID") -} - -// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. -type ErrDomainIDWithToken struct{ BaseError } - -func (e ErrDomainIDWithToken) Error() string { - return redundantWithTokenErr("DomainID") -} - -// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s -type ErrDomainNameWithToken struct{ BaseError } - -func (e ErrDomainNameWithToken) Error() string { - return redundantWithTokenErr("DomainName") -} - -// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. -type ErrUsernameOrUserID struct{ BaseError } - -func (e ErrUsernameOrUserID) Error() string { - return "Exactly one of Username and UserID must be provided for password authentication" -} - -// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. -type ErrDomainIDWithUserID struct{ BaseError } - -func (e ErrDomainIDWithUserID) Error() string { - return redundantWithUserID("DomainID") -} - -// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. -type ErrDomainNameWithUserID struct{ BaseError } - -func (e ErrDomainNameWithUserID) Error() string { - return redundantWithUserID("DomainName") -} - -// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. -// It may also indicate that both a DomainID and a DomainName were provided at once. -type ErrDomainIDOrDomainName struct{ BaseError } - -func (e ErrDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName to authenticate by Username" -} - -// ErrMissingPassword indicates that no password was provided and no token is available. -type ErrMissingPassword struct{ BaseError } - -func (e ErrMissingPassword) Error() string { - return "You must provide a password to authenticate" -} - -// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. -type ErrScopeDomainIDOrDomainName struct{ BaseError } - -func (e ErrScopeDomainIDOrDomainName) Error() string { - return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" -} - -// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. -type ErrScopeProjectIDOrProjectName struct{ BaseError } - -func (e ErrScopeProjectIDOrProjectName) Error() string { - return "You must provide at most one of ProjectID or ProjectName in a Scope" -} - -// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. -type ErrScopeProjectIDAlone struct{ BaseError } - -func (e ErrScopeProjectIDAlone) Error() string { - return "ProjectID must be supplied alone in a Scope" -} - -// ErrScopeEmpty indicates that no credentials were provided in a Scope. -type ErrScopeEmpty struct{ BaseError } - -func (e ErrScopeEmpty) Error() string { - return "You must provide either a Project or Domain in a Scope" -} - -// ErrAppCredMissingSecret indicates that no Application Credential Secret was provided with Application Credential ID or Name -type ErrAppCredMissingSecret struct{ BaseError } - -func (e ErrAppCredMissingSecret) Error() string { - return "You must provide an Application Credential Secret" -} diff --git a/vendor/github.com/gophercloud/gophercloud/go.mod b/vendor/github.com/gophercloud/gophercloud/go.mod deleted file mode 100644 index 64e2a0fb484..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module github.com/gophercloud/gophercloud - -go 1.13 - -require ( - github.com/kr/pretty v0.2.1 // indirect - golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e - golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 // indirect - gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 // indirect - gopkg.in/yaml.v2 v2.3.0 -) diff --git a/vendor/github.com/gophercloud/gophercloud/go.sum b/vendor/github.com/gophercloud/gophercloud/go.sum deleted file mode 100644 index 311ab0449d7..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/go.sum +++ /dev/null @@ -1,19 +0,0 @@ -github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e h1:egKlR8l7Nu9vHGWbcUV8lqR4987UfUbBd7GbhqGzNYU= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9 h1:ZBzSG/7F4eNKz2L3GE9o300RX0Az1Bw5HF7PDraD+qU= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go deleted file mode 100644 index c801de55532..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go +++ /dev/null @@ -1,128 +0,0 @@ -package openstack - -import ( - "os" - - "github.com/gophercloud/gophercloud" -) - -var nilOptions = gophercloud.AuthOptions{} - -/* -AuthOptionsFromEnv fills out an identity.AuthOptions structure with the -settings found on the various OpenStack OS_* environment variables. - -The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, -OS_PASSWORD and OS_PROJECT_ID. - -Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, -or an error will result. OS_PROJECT_ID, is optional. - -OS_TENANT_ID and OS_TENANT_NAME are deprecated forms of OS_PROJECT_ID and -OS_PROJECT_NAME and the latter are expected against a v3 auth api. - -If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will still be referred -as "tenant" in Gophercloud. - -If OS_PROJECT_NAME is set, it requires OS_PROJECT_ID to be set as well to -handle projects not on the default domain. - -To use this function, first set the OS_* environment variables (for example, -by sourcing an `openrc` file), then: - - opts, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(opts) -*/ -func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { - authURL := os.Getenv("OS_AUTH_URL") - username := os.Getenv("OS_USERNAME") - userID := os.Getenv("OS_USERID") - password := os.Getenv("OS_PASSWORD") - passcode := os.Getenv("OS_PASSCODE") - tenantID := os.Getenv("OS_TENANT_ID") - tenantName := os.Getenv("OS_TENANT_NAME") - domainID := os.Getenv("OS_DOMAIN_ID") - domainName := os.Getenv("OS_DOMAIN_NAME") - applicationCredentialID := os.Getenv("OS_APPLICATION_CREDENTIAL_ID") - applicationCredentialName := os.Getenv("OS_APPLICATION_CREDENTIAL_NAME") - applicationCredentialSecret := os.Getenv("OS_APPLICATION_CREDENTIAL_SECRET") - - // If OS_PROJECT_ID is set, overwrite tenantID with the value. - if v := os.Getenv("OS_PROJECT_ID"); v != "" { - tenantID = v - } - - // If OS_PROJECT_NAME is set, overwrite tenantName with the value. - if v := os.Getenv("OS_PROJECT_NAME"); v != "" { - tenantName = v - } - - if authURL == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_AUTH_URL", - } - return nilOptions, err - } - - if userID == "" && username == "" { - // Empty username and userID could be ignored, when applicationCredentialID and applicationCredentialSecret are set - if applicationCredentialID == "" && applicationCredentialSecret == "" { - err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, - } - return nilOptions, err - } - } - - if password == "" && passcode == "" && applicationCredentialID == "" && applicationCredentialName == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - // silently ignore TOTP passcode warning, since it is not a common auth method - EnvironmentVariable: "OS_PASSWORD", - } - return nilOptions, err - } - - if (applicationCredentialID != "" || applicationCredentialName != "") && applicationCredentialSecret == "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_APPLICATION_CREDENTIAL_SECRET", - } - return nilOptions, err - } - - if domainID == "" && domainName == "" && tenantID == "" && tenantName != "" { - err := gophercloud.ErrMissingEnvironmentVariable{ - EnvironmentVariable: "OS_PROJECT_ID", - } - return nilOptions, err - } - - if applicationCredentialID == "" && applicationCredentialName != "" && applicationCredentialSecret != "" { - if userID == "" && username == "" { - return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_USERID", "OS_USERNAME"}, - } - } - if username != "" && domainID == "" && domainName == "" { - return nilOptions, gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ - EnvironmentVariables: []string{"OS_DOMAIN_ID", "OS_DOMAIN_NAME"}, - } - } - } - - ao := gophercloud.AuthOptions{ - IdentityEndpoint: authURL, - UserID: userID, - Username: username, - Password: password, - Passcode: passcode, - TenantID: tenantID, - TenantName: tenantName, - DomainID: domainID, - DomainName: domainName, - ApplicationCredentialID: applicationCredentialID, - ApplicationCredentialName: applicationCredentialName, - ApplicationCredentialSecret: applicationCredentialSecret, - } - - return ao, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go deleted file mode 100644 index 655a9f6b915..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/client.go +++ /dev/null @@ -1,503 +0,0 @@ -package openstack - -import ( - "fmt" - "reflect" - "strings" - - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens" - "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - "github.com/gophercloud/gophercloud/openstack/utils" -) - -const ( - // v2 represents Keystone v2. - // It should never increase beyond 2.0. - v2 = "v2.0" - - // v3 represents Keystone v3. - // The version can be anything from v3 to v3.x. - v3 = "v3" -) - -/* -NewClient prepares an unauthenticated ProviderClient instance. -Most users will probably prefer using the AuthenticatedClient function -instead. - -This is useful if you wish to explicitly control the version of the identity -service that's used for authentication explicitly, for example. - -A basic example of using this would be: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.NewClient(ao.IdentityEndpoint) - client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) -*/ -func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { - base, err := utils.BaseEndpoint(endpoint) - if err != nil { - return nil, err - } - - endpoint = gophercloud.NormalizeURL(endpoint) - base = gophercloud.NormalizeURL(base) - - p := new(gophercloud.ProviderClient) - p.IdentityBase = base - p.IdentityEndpoint = endpoint - p.UseTokenLock() - - return p, nil -} - -/* -AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint -specified by the options, acquires a token, and returns a Provider Client -instance that's ready to operate. - -If the full path to a versioned identity endpoint was specified (example: -http://example.com:5000/v3), that path will be used as the endpoint to query. - -If a versionless endpoint was specified (example: http://example.com:5000/), -the endpoint will be queried to determine which versions of the identity service -are available, then chooses the most recent or most supported version. - -Example: - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { - client, err := NewClient(options.IdentityEndpoint) - if err != nil { - return nil, err - } - - err = Authenticate(client, options) - if err != nil { - return nil, err - } - return client, nil -} - -// Authenticate or re-authenticate against the most recent identity service -// supported at the provided endpoint. -func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { - versions := []*utils.Version{ - {ID: v2, Priority: 20, Suffix: "/v2.0/"}, - {ID: v3, Priority: 30, Suffix: "/v3/"}, - } - - chosen, endpoint, err := utils.ChooseVersion(client, versions) - if err != nil { - return err - } - - switch chosen.ID { - case v2: - return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) - case v3: - return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) - default: - // The switch statement must be out of date from the versions list. - return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) - } -} - -// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. -func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - return v2auth(client, "", options, eo) -} - -func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { - v2Client, err := NewIdentityV2(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v2Client.Endpoint = endpoint - } - - v2Opts := tokens2.AuthOptions{ - IdentityEndpoint: options.IdentityEndpoint, - Username: options.Username, - Password: options.Password, - TenantID: options.TenantID, - TenantName: options.TenantName, - AllowReauth: options.AllowReauth, - TokenID: options.TokenID, - } - - result := tokens2.Create(v2Client, v2Opts) - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err := result.ExtractServiceCatalog() - if err != nil { - return err - } - - if options.AllowReauth { - // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but - // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, - // this should retry authentication only once - tac := *client - tac.SetThrowaway(true) - tac.ReauthFunc = nil - tac.SetTokenAndAuthResult(nil) - tao := options - tao.AllowReauth = false - client.ReauthFunc = func() error { - err := v2auth(&tac, endpoint, tao, eo) - if err != nil { - return err - } - client.CopyTokenFrom(&tac) - return nil - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V2EndpointURL(catalog, opts) - } - - return nil -} - -// AuthenticateV3 explicitly authenticates against the identity v3 service. -func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - return v3auth(client, "", options, eo) -} - -func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { - // Override the generated service endpoint with the one returned by the version endpoint. - v3Client, err := NewIdentityV3(client, eo) - if err != nil { - return err - } - - if endpoint != "" { - v3Client.Endpoint = endpoint - } - - var catalog *tokens3.ServiceCatalog - - var tokenID string - // passthroughToken allows to passthrough the token without a scope - var passthroughToken bool - switch v := opts.(type) { - case *gophercloud.AuthOptions: - tokenID = v.TokenID - passthroughToken = (v.Scope == nil || *v.Scope == gophercloud.AuthScope{}) - case *tokens3.AuthOptions: - tokenID = v.TokenID - passthroughToken = (v.Scope == tokens3.Scope{}) - } - - if tokenID != "" && passthroughToken { - // passing through the token ID without requesting a new scope - if opts.CanReauth() { - return fmt.Errorf("cannot use AllowReauth, when the token ID is defined and auth scope is not set") - } - - v3Client.SetToken(tokenID) - result := tokens3.Get(v3Client, tokenID) - if result.Err != nil { - return result.Err - } - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err = result.ExtractServiceCatalog() - if err != nil { - return err - } - } else { - var result tokens3.CreateResult - switch opts.(type) { - case *ec2tokens.AuthOptions: - result = ec2tokens.Create(v3Client, opts) - case *oauth1.AuthOptions: - result = oauth1.Create(v3Client, opts) - default: - result = tokens3.Create(v3Client, opts) - } - - err = client.SetTokenAndAuthResult(result) - if err != nil { - return err - } - - catalog, err = result.ExtractServiceCatalog() - if err != nil { - return err - } - } - - if opts.CanReauth() { - // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but - // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, - // this should retry authentication only once - tac := *client - tac.SetThrowaway(true) - tac.ReauthFunc = nil - tac.SetTokenAndAuthResult(nil) - var tao tokens3.AuthOptionsBuilder - switch ot := opts.(type) { - case *gophercloud.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - case *tokens3.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - case *ec2tokens.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - case *oauth1.AuthOptions: - o := *ot - o.AllowReauth = false - tao = &o - default: - tao = opts - } - client.ReauthFunc = func() error { - err := v3auth(&tac, endpoint, tao, eo) - if err != nil { - return err - } - client.CopyTokenFrom(&tac) - return nil - } - } - client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { - return V3EndpointURL(catalog, opts) - } - - return nil -} - -// NewIdentityV2 creates a ServiceClient that may be used to interact with the -// v2 identity service. -func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v2.0/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -// NewIdentityV3 creates a ServiceClient that may be used to access the v3 -// identity service. -func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - endpoint := client.IdentityBase + "v3/" - clientType := "identity" - var err error - if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { - eo.ApplyDefaults(clientType) - endpoint, err = client.EndpointLocator(eo) - if err != nil { - return nil, err - } - } - - // Ensure endpoint still has a suffix of v3. - // This is because EndpointLocator might have found a versionless - // endpoint or the published endpoint is still /v2.0. In both - // cases, we need to fix the endpoint to point to /v3. - base, err := utils.BaseEndpoint(endpoint) - if err != nil { - return nil, err - } - - base = gophercloud.NormalizeURL(base) - - endpoint = base + "v3/" - - return &gophercloud.ServiceClient{ - ProviderClient: client, - Endpoint: endpoint, - Type: clientType, - }, nil -} - -func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { - sc := new(gophercloud.ServiceClient) - eo.ApplyDefaults(clientType) - url, err := client.EndpointLocator(eo) - if err != nil { - return sc, err - } - sc.ProviderClient = client - sc.Endpoint = url - sc.Type = clientType - return sc, nil -} - -// NewBareMetalV1 creates a ServiceClient that may be used with the v1 -// bare metal package. -func NewBareMetalV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal") -} - -// NewBareMetalIntrospectionV1 creates a ServiceClient that may be used with the v1 -// bare metal introspection package. -func NewBareMetalIntrospectionV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "baremetal-inspector") -} - -// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 -// object storage package. -func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "object-store") -} - -// NewComputeV2 creates a ServiceClient that may be used with the v2 compute -// package. -func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "compute") -} - -// NewNetworkV2 creates a ServiceClient that may be used with the v2 network -// package. -func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "network") - sc.ResourceBase = sc.Endpoint + "v2.0/" - return sc, err -} - -// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 -// block storage service. -func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volume") -} - -// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 -// block storage service. -func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev2") -} - -// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. -func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "volumev3") -} - -// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. -func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "sharev2") -} - -// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 -// CDN service. -func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "cdn") -} - -// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 -// orchestration service. -func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "orchestration") -} - -// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. -func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "database") -} - -// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS -// service. -func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "dns") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 -// image service. -func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "image") - sc.ResourceBase = sc.Endpoint + "v2/" - return sc, err -} - -// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 -// load balancer service. -func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "load-balancer") - - // Fixes edge case having an OpenStack lb endpoint with trailing version number. - endpoint := strings.Replace(sc.Endpoint, "v2.0/", "", -1) - - sc.ResourceBase = endpoint + "v2.0/" - return sc, err -} - -// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering -// package. -func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "clustering") -} - -// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging -// service. -func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "messaging") - sc.MoreHeaders = map[string]string{"Client-ID": clientID} - return sc, err -} - -// NewContainerV1 creates a ServiceClient that may be used with v1 container package -func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container") -} - -// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key -// manager service. -func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - sc, err := initClientOpts(client, eo, "key-manager") - sc.ResourceBase = sc.Endpoint + "v1/" - return sc, err -} - -// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management -// package. -func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "container-infra") -} - -// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. -func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "workflowv2") -} - -// NewPlacementV1 creates a ServiceClient that may be used with the placement package. -func NewPlacementV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { - return initClientOpts(client, eo, "placement") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go deleted file mode 100644 index af4bd512bf5..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go +++ /dev/null @@ -1,14 +0,0 @@ -/* -Package openstack contains resources for the individual OpenStack projects -supported in Gophercloud. It also includes functions to authenticate to an -OpenStack cloud and for provisioning various service-level clients. - -Example of Creating a Service Client - - ao, err := openstack.AuthOptionsFromEnv() - provider, err := openstack.AuthenticatedClient(ao) - client, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ - Region: os.Getenv("OS_REGION_NAME"), - }) -*/ -package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go deleted file mode 100644 index 509700790ef..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go +++ /dev/null @@ -1,111 +0,0 @@ -package openstack - -import ( - "github.com/gophercloud/gophercloud" - tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" - tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -/* -V2EndpointURL discovers the endpoint URL for a specific service from a -ServiceCatalog acquired during the v2 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. - var endpoints = make([]tokens2.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Region == "" || endpoint.Region == opts.Region { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // If multiple endpoints were found, use the first result - // and disregard the other endpoints. - // - // This behavior matches the Python library. See GH-1764. - if len(endpoints) > 1 { - endpoints = endpoints[0:1] - } - - // Extract the appropriate URL from the matching Endpoint. - for _, endpoint := range endpoints { - switch opts.Availability { - case gophercloud.AvailabilityPublic: - return gophercloud.NormalizeURL(endpoint.PublicURL), nil - case gophercloud.AvailabilityInternal: - return gophercloud.NormalizeURL(endpoint.InternalURL), nil - case gophercloud.AvailabilityAdmin: - return gophercloud.NormalizeURL(endpoint.AdminURL), nil - default: - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} - -/* -V3EndpointURL discovers the endpoint URL for a specific service from a Catalog -acquired during the v3 identity service. - -The specified EndpointOpts are used to identify a unique, unambiguous endpoint -to return. It's an error both when multiple endpoints match the provided -criteria and when none do. The minimum that can be specified is a Type, but you -will also often need to specify a Name and/or a Region depending on what's -available on your OpenStack deployment. -*/ -func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { - // Extract Endpoints from the catalog entries that match the requested Type, Interface, - // Name if provided, and Region if provided. - var endpoints = make([]tokens3.Endpoint, 0, 1) - for _, entry := range catalog.Entries { - if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { - for _, endpoint := range entry.Endpoints { - if opts.Availability != gophercloud.AvailabilityAdmin && - opts.Availability != gophercloud.AvailabilityPublic && - opts.Availability != gophercloud.AvailabilityInternal { - err := &ErrInvalidAvailabilityProvided{} - err.Argument = "Availability" - err.Value = opts.Availability - return "", err - } - if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && - (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { - endpoints = append(endpoints, endpoint) - } - } - } - } - - // If multiple endpoints were found, use the first result - // and disregard the other endpoints. - // - // This behavior matches the Python library. See GH-1764. - if len(endpoints) > 1 { - endpoints = endpoints[0:1] - } - - // Extract the URL from the matching Endpoint. - for _, endpoint := range endpoints { - return gophercloud.NormalizeURL(endpoint.URL), nil - } - - // Report an error if there were no matching endpoints. - err := &gophercloud.ErrEndpointNotFound{} - return "", err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go deleted file mode 100644 index cba6ae5f00c..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -package openstack - -import ( - "fmt" - - "github.com/gophercloud/gophercloud" -) - -// ErrEndpointNotFound is the error when no suitable endpoint can be found -// in the user's catalog -type ErrEndpointNotFound struct{ gophercloud.BaseError } - -func (e ErrEndpointNotFound) Error() string { - return "No suitable endpoint could be found in the service catalog." -} - -// ErrInvalidAvailabilityProvided is the error when an invalid endpoint -// availability is provided -type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } - -func (e ErrInvalidAvailabilityProvided) Error() string { - return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) -} - -// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not -// found -type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoAuthURL) Error() string { - return "Environment variable OS_AUTH_URL needs to be set." -} - -// ErrNoUsername is the error when the OS_USERNAME environment variable is not -// found -type ErrNoUsername struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoUsername) Error() string { - return "Environment variable OS_USERNAME needs to be set." -} - -// ErrNoPassword is the error when the OS_PASSWORD environment variable is not -// found -type ErrNoPassword struct{ gophercloud.ErrInvalidInput } - -func (e ErrNoPassword) Error() string { - return "Environment variable OS_PASSWORD needs to be set." -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go deleted file mode 100644 index 348dd208396..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go +++ /dev/null @@ -1,65 +0,0 @@ -/* -Package tenants provides information and interaction with the -tenants API resource for the OpenStack Identity service. - -See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 -and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants -for more information. - -Example to List Tenants - - listOpts := &tenants.ListOpts{ - Limit: 2, - } - - allPages, err := tenants.List(identityClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allTenants, err := tenants.ExtractTenants(allPages) - if err != nil { - panic(err) - } - - for _, tenant := range allTenants { - fmt.Printf("%+v\n", tenant) - } - -Example to Create a Tenant - - createOpts := tenants.CreateOpts{ - Name: "tenant_name", - Description: "this is a tenant", - Enabled: gophercloud.Enabled, - } - - tenant, err := tenants.Create(identityClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - updateOpts := tenants.UpdateOpts{ - Description: "this is a new description", - Enabled: gophercloud.Disabled, - } - - tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Tenant - - tenantID := "e6db6ed6277c461a853458589063b295" - - err := tenants.Delete(identitYClient, tenantID).ExtractErr() - if err != nil { - panic(err) - } -*/ -package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go deleted file mode 100644 index f16df38e5ec..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go +++ /dev/null @@ -1,120 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOpts filters the Tenants that are returned by the List call. -type ListOpts struct { - // Marker is the ID of the last Tenant on the previous page. - Marker string `q:"marker"` - - // Limit specifies the page size. - Limit int `q:"limit"` -} - -// List enumerates the Tenants to which the current token has access. -func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { - url := listURL(client) - if opts != nil { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return pagination.Pager{Err: err} - } - url += q.String() - } - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return TenantPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// CreateOpts represents the options needed when creating new tenant. -type CreateOpts struct { - // Name is the name of the tenant. - Name string `json:"name" required:"true"` - - // Description is the description of the tenant. - Description string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// CreateOptsBuilder enables extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToTenantCreateMap() (map[string]interface{}, error) -} - -// ToTenantCreateMap assembles a request body based on the contents of -// a CreateOpts. -func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Create is the operation responsible for creating new tenant. -func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { - b, err := opts.ToTenantCreateMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 201}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Get requests details on a single tenant by ID. -func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { - resp, err := client.Get(getURL(client, id), &r.Body, nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToTenantUpdateMap() (map[string]interface{}, error) -} - -// UpdateOpts specifies the base attributes that may be updated on an existing -// tenant. -type UpdateOpts struct { - // Name is the name of the tenant. - Name string `json:"name,omitempty"` - - // Description is the description of the tenant. - Description *string `json:"description,omitempty"` - - // Enabled sets the tenant status to enabled or disabled. - Enabled *bool `json:"enabled,omitempty"` -} - -// ToTenantUpdateMap formats an UpdateOpts structure into a request body. -func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "tenant") -} - -// Update is the operation responsible for updating exist tenants by their TenantID. -func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { - b, err := opts.ToTenantUpdateMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Delete is the operation responsible for permanently deleting a tenant. -func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { - resp, err := client.Delete(deleteURL(client, id), nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go deleted file mode 100644 index bb6c2c6b08a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go +++ /dev/null @@ -1,91 +0,0 @@ -package tenants - -import ( - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Tenant is a grouping of users in the identity service. -type Tenant struct { - // ID is a unique identifier for this tenant. - ID string `json:"id"` - - // Name is a friendlier user-facing name for this tenant. - Name string `json:"name"` - - // Description is a human-readable explanation of this Tenant's purpose. - Description string `json:"description"` - - // Enabled indicates whether or not a tenant is active. - Enabled bool `json:"enabled"` -} - -// TenantPage is a single page of Tenant results. -type TenantPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a page of Tenants contains any results. -func (r TenantPage) IsEmpty() (bool, error) { - tenants, err := ExtractTenants(r) - return len(tenants) == 0, err -} - -// NextPageURL extracts the "next" link from the tenants_links section of the result. -func (r TenantPage) NextPageURL() (string, error) { - var s struct { - Links []gophercloud.Link `json:"tenants_links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return gophercloud.ExtractNextURL(s.Links) -} - -// ExtractTenants returns a slice of Tenants contained in a single page of -// results. -func ExtractTenants(r pagination.Page) ([]Tenant, error) { - var s struct { - Tenants []Tenant `json:"tenants"` - } - err := (r.(TenantPage)).ExtractInto(&s) - return s.Tenants, err -} - -type tenantResult struct { - gophercloud.Result -} - -// Extract interprets any tenantResults as a Tenant. -func (r tenantResult) Extract() (*Tenant, error) { - var s struct { - Tenant *Tenant `json:"tenant"` - } - err := r.ExtractInto(&s) - return s.Tenant, err -} - -// GetResult is the response from a Get request. Call its Extract method to -// interpret it as a Tenant. -type GetResult struct { - tenantResult -} - -// CreateResult is the response from a Create request. Call its Extract method -// to interpret it as a Tenant. -type CreateResult struct { - tenantResult -} - -// DeleteResult is the response from a Get request. Call its ExtractErr method -// to determine if the call succeeded or failed. -type DeleteResult struct { - gophercloud.ErrResult -} - -// UpdateResult is the response from a Update request. Call its Extract method -// to interpret it as a Tenant. -type UpdateResult struct { - tenantResult -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go deleted file mode 100644 index 0f026690790..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go +++ /dev/null @@ -1,23 +0,0 @@ -package tenants - -import "github.com/gophercloud/gophercloud" - -func listURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func getURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func createURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tenants") -} - -func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} - -func updateURL(client *gophercloud.ServiceClient, tenantID string) string { - return client.ServiceURL("tenants", tenantID) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go deleted file mode 100644 index 5375eea8726..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 - -Example to Create an Unscoped Token from a Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "pass" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant ID and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantID: "fc394f2ab2df4114bde39905f800dc57" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Tenant Name and Password - - authOpts := gophercloud.AuthOptions{ - Username: "user", - Password: "password", - TenantName: "tenantname" - } - - token, err := tokens.Create(identityClient, authOpts).ExtractToken() - if err != nil { - panic(err) - } -*/ -package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go deleted file mode 100644 index 2b64f108cbd..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go +++ /dev/null @@ -1,105 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// PasswordCredentialsV2 represents the required options to authenticate -// with a username and password. -type PasswordCredentialsV2 struct { - Username string `json:"username" required:"true"` - Password string `json:"password" required:"true"` -} - -// TokenCredentialsV2 represents the required options to authenticate -// with a token. -type TokenCredentialsV2 struct { - ID string `json:"id,omitempty" required:"true"` -} - -// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the -// AuthOptionsBuilder interface. -type AuthOptionsV2 struct { - PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` - - // The TenantID and TenantName fields are optional for the Identity V2 API. - // Some providers allow you to specify a TenantName instead of the TenantId. - // Some require both. Your provider's authentication policies will determine - // how these fields influence authentication. - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - - // TokenCredentials allows users to authenticate (possibly as another user) - // with an authentication token ID. - TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` -} - -// AuthOptionsBuilder allows extensions to add additional parameters to the -// token create request. -type AuthOptionsBuilder interface { - // ToTokenCreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV2CreateMap() (map[string]interface{}, error) -} - -// AuthOptions are the valid options for Openstack Identity v2 authentication. -// For field descriptions, see gophercloud.AuthOptions. -type AuthOptions struct { - IdentityEndpoint string `json:"-"` - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - TenantID string `json:"tenantId,omitempty"` - TenantName string `json:"tenantName,omitempty"` - AllowReauth bool `json:"-"` - TokenID string -} - -// ToTokenV2CreateMap builds a token request body from the given AuthOptions. -func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { - v2Opts := AuthOptionsV2{ - TenantID: opts.TenantID, - TenantName: opts.TenantName, - } - - if opts.Password != "" { - v2Opts.PasswordCredentials = &PasswordCredentialsV2{ - Username: opts.Username, - Password: opts.Password, - } - } else { - v2Opts.TokenCredentials = &TokenCredentialsV2{ - ID: opts.TokenID, - } - } - - b, err := gophercloud.BuildRequestBody(v2Opts, "auth") - if err != nil { - return nil, err - } - return b, nil -} - -// Create authenticates to the identity service and attempts to acquire a Token. -// Generally, rather than interact with this call directly, end users should -// call openstack.AuthenticatedClient(), which abstracts all of the gory details -// about navigating service catalogs and such. -func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { - b, err := auth.ToTokenV2CreateMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Get validates and retrieves information for user's token. -func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { - resp, err := client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200, 203}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go deleted file mode 100644 index ee5da37f465..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go +++ /dev/null @@ -1,174 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" -) - -// Token provides only the most basic information related to an authentication -// token. -type Token struct { - // ID provides the primary means of identifying a user to the OpenStack API. - // OpenStack defines this field as an opaque value, so do not depend on its - // content. It is safe, however, to compare for equality. - ID string - - // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the - // authentication token becomes invalid. After this point in time, future - // API requests made using this authentication token will respond with - // errors. Either the caller will need to reauthenticate manually, or more - // preferably, the caller should exploit automatic re-authentication. - // See the AuthOptions structure for more details. - ExpiresAt time.Time - - // Tenant provides information about the tenant to which this token grants - // access. - Tenant tenants.Tenant -} - -// Role is a role for a user. -type Role struct { - Name string `json:"name"` -} - -// User is an OpenStack user. -type User struct { - ID string `json:"id"` - Name string `json:"name"` - UserName string `json:"username"` - Roles []Role `json:"roles"` -} - -// Endpoint represents a single API endpoint offered by a service. -// It provides the public and internal URLs, if supported, along with a region -// specifier, again if provided. -// -// The significance of the Region field will depend upon your provider. -// -// In addition, the interface offered by the service will have version -// information associated with it through the VersionId, VersionInfo, and -// VersionList fields, if provided or supported. -// -// In all cases, fields which aren't supported by the provider and service -// combined will assume a zero-value (""). -type Endpoint struct { - TenantID string `json:"tenantId"` - PublicURL string `json:"publicURL"` - InternalURL string `json:"internalURL"` - AdminURL string `json:"adminURL"` - Region string `json:"region"` - VersionID string `json:"versionId"` - VersionInfo string `json:"versionInfo"` - VersionList string `json:"versionList"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V2 service -// catalog listing. -// -// Each class of service, such as cloud DNS or block storage services, will have -// a single CatalogEntry representing it. -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may assign - // their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry -} - -// CreateResult is the response from a Create request. Use ExtractToken() to -// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a -// service catalog. -type CreateResult struct { - gophercloud.Result -} - -// GetResult is the deferred response from a Get call, which is the same with a -// Created token. Use ExtractUser() to interpret it as a User. -type GetResult struct { - CreateResult -} - -// ExtractToken returns the just-created Token from a CreateResult. -func (r CreateResult) ExtractToken() (*Token, error) { - var s struct { - Access struct { - Token struct { - Expires string `json:"expires"` - ID string `json:"id"` - Tenant tenants.Tenant `json:"tenant"` - } `json:"token"` - } `json:"access"` - } - - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) - if err != nil { - return nil, err - } - - return &Token{ - ID: s.Access.Token.ID, - ExpiresAt: expiresTs, - Tenant: s.Access.Token.Tenant, - }, nil -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r CreateResult) ExtractTokenID() (string, error) { - var s struct { - Access struct { - Token struct { - ID string `json:"id"` - } `json:"token"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return s.Access.Token.ID, err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s struct { - Access struct { - Entries []CatalogEntry `json:"serviceCatalog"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &ServiceCatalog{Entries: s.Access.Entries}, err -} - -// ExtractUser returns the User from a GetResult. -func (r GetResult) ExtractUser() (*User, error) { - var s struct { - Access struct { - User User `json:"user"` - } `json:"access"` - } - err := r.ExtractInto(&s) - return &s.Access.User, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go deleted file mode 100644 index ee0a28f2004..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go +++ /dev/null @@ -1,13 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// CreateURL generates the URL used to create new Tokens. -func CreateURL(client *gophercloud.ServiceClient) string { - return client.ServiceURL("tokens") -} - -// GetURL generates the URL used to Validate Tokens. -func GetURL(client *gophercloud.ServiceClient, token string) string { - return client.ServiceURL("tokens", token) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go deleted file mode 100644 index 1f6f807fe06..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/doc.go +++ /dev/null @@ -1,41 +0,0 @@ -/* -Package tokens provides information and interaction with the EC2 token API -resource for the OpenStack Identity service. - -For more information, see: -https://docs.openstack.org/api-ref/identity/v2-ext/ - -Example to Create a Token From an EC2 access and secret keys - - var authOptions tokens.AuthOptionsBuilder - authOptions = &ec2tokens.AuthOptions{ - Access: "a7f1e798b7c2417cba4a02de97dc3cdc", - Secret: "18f4f6761ada4e3795fa5273c30349b9", - } - - token, err := ec2tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to auth a client using EC2 access and secret keys - - client, err := openstack.NewClient("http://localhost:5000/v3") - if err != nil { - panic(err) - } - - var authOptions tokens.AuthOptionsBuilder - authOptions = &ec2tokens.AuthOptions{ - Access: "a7f1e798b7c2417cba4a02de97dc3cdc", - Secret: "18f4f6761ada4e3795fa5273c30349b9", - AllowReauth: true, - } - - err = openstack.AuthenticateV3(client, authOptions, gophercloud.EndpointOpts{}) - if err != nil { - panic(err) - } - -*/ -package ec2tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/requests.go deleted file mode 100644 index 32ba0e621d9..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/requests.go +++ /dev/null @@ -1,377 +0,0 @@ -package ec2tokens - -import ( - "crypto/hmac" - "crypto/sha1" - "crypto/sha256" - "encoding/hex" - "fmt" - "math/rand" - "net/url" - "sort" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" -) - -const ( - // EC2CredentialsAwsRequestV4 is a constant, used to generate AWS - // Credential V4. - EC2CredentialsAwsRequestV4 = "aws4_request" - // EC2CredentialsHmacSha1V2 is a HMAC SHA1 signature method. Used to - // generate AWS Credential V2. - EC2CredentialsHmacSha1V2 = "HmacSHA1" - // EC2CredentialsHmacSha256V2 is a HMAC SHA256 signature method. Used - // to generate AWS Credential V2. - EC2CredentialsHmacSha256V2 = "HmacSHA256" - // EC2CredentialsAwsHmacV4 is an AWS signature V4 signing method. - // More details: - // https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html - EC2CredentialsAwsHmacV4 = "AWS4-HMAC-SHA256" - // EC2CredentialsTimestampFormatV4 is an AWS signature V4 timestamp - // format. - EC2CredentialsTimestampFormatV4 = "20060102T150405Z" - // EC2CredentialsDateFormatV4 is an AWS signature V4 date format. - EC2CredentialsDateFormatV4 = "20060102" -) - -// AuthOptions represents options for authenticating a user using EC2 credentials. -type AuthOptions struct { - // Access is the EC2 Credential Access ID. - Access string `json:"access" required:"true"` - // Secret is the EC2 Credential Secret, used to calculate signature. - // Not used, when a Signature is is. - Secret string `json:"-"` - // Host is a HTTP request Host header. Used to calculate an AWS - // signature V2. For signature V4 set the Host inside Headers map. - // Optional. - Host string `json:"host"` - // Path is a HTTP request path. Optional. - Path string `json:"path"` - // Verb is a HTTP request method. Optional. - Verb string `json:"verb"` - // Headers is a map of HTTP request headers. Optional. - Headers map[string]string `json:"headers"` - // Region is a region name to calculate an AWS signature V4. Optional. - Region string `json:"-"` - // Service is a service name to calculate an AWS signature V4. Optional. - Service string `json:"-"` - // Params is a map of GET method parameters. Optional. - Params map[string]string `json:"params"` - // AllowReauth allows Gophercloud to re-authenticate automatically - // if/when your token expires. - AllowReauth bool `json:"-"` - // Signature can be either a []byte (encoded to base64 automatically) or - // a string. You can set the singature explicitly, when you already know - // it. In this case default Params won't be automatically set. Optional. - Signature interface{} `json:"signature"` - // BodyHash is a HTTP request body sha256 hash. When nil and Signature - // is not set, a random hash is generated. Optional. - BodyHash *string `json:"body_hash"` - // Timestamp is a timestamp to calculate a V4 signature. Optional. - Timestamp *time.Time `json:"-"` - // Token is a []byte string (encoded to base64 automatically) which was - // signed by an EC2 secret key. Used by S3 tokens for validation only. - // Token must be set with a Signature. If a Signature is not provided, - // a Token will be generated automatically along with a Signature. - Token []byte `json:"token,omitempty"` -} - -// EC2CredentialsBuildCanonicalQueryStringV2 builds a canonical query string -// for an AWS signature V2. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L133 -func EC2CredentialsBuildCanonicalQueryStringV2(params map[string]string) string { - var keys []string - for k := range params { - keys = append(keys, k) - } - sort.Strings(keys) - - var pairs []string - for _, k := range keys { - pairs = append(pairs, fmt.Sprintf("%s=%s", k, url.QueryEscape(params[k]))) - } - - return strings.Join(pairs, "&") -} - -// EC2CredentialsBuildStringToSignV2 builds a string to sign an AWS signature -// V2. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L148 -func EC2CredentialsBuildStringToSignV2(opts AuthOptions) []byte { - stringToSign := strings.Join([]string{ - opts.Verb, - opts.Host, - opts.Path, - }, "\n") - - return []byte(strings.Join([]string{ - stringToSign, - EC2CredentialsBuildCanonicalQueryStringV2(opts.Params), - }, "\n")) -} - -// EC2CredentialsBuildCanonicalQueryStringV2 builds a canonical query string -// for an AWS signature V4. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L244 -func EC2CredentialsBuildCanonicalQueryStringV4(verb string, params map[string]string) string { - if verb == "POST" { - return "" - } - return EC2CredentialsBuildCanonicalQueryStringV2(params) -} - -// EC2CredentialsBuildCanonicalHeadersV4 builds a canonical string based on -// "headers" map and "signedHeaders" string parameters. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L216 -func EC2CredentialsBuildCanonicalHeadersV4(headers map[string]string, signedHeaders string) string { - headersLower := make(map[string]string, len(headers)) - for k, v := range headers { - headersLower[strings.ToLower(k)] = v - } - - var headersList []string - for _, h := range strings.Split(signedHeaders, ";") { - if v, ok := headersLower[h]; ok { - headersList = append(headersList, h+":"+v) - } - } - - return strings.Join(headersList, "\n") + "\n" -} - -// EC2CredentialsBuildSignatureKeyV4 builds a HMAC 256 signature key based on -// input parameters. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L169 -func EC2CredentialsBuildSignatureKeyV4(secret, region, service string, date time.Time) []byte { - kDate := sumHMAC256([]byte("AWS4"+secret), []byte(date.Format(EC2CredentialsDateFormatV4))) - kRegion := sumHMAC256(kDate, []byte(region)) - kService := sumHMAC256(kRegion, []byte(service)) - return sumHMAC256(kService, []byte(EC2CredentialsAwsRequestV4)) -} - -// EC2CredentialsBuildStringToSignV4 builds an AWS v4 signature string to sign -// based on input parameters. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L251 -func EC2CredentialsBuildStringToSignV4(opts AuthOptions, signedHeaders string, bodyHash string, date time.Time) []byte { - scope := strings.Join([]string{ - date.Format(EC2CredentialsDateFormatV4), - opts.Region, - opts.Service, - EC2CredentialsAwsRequestV4, - }, "/") - - canonicalRequest := strings.Join([]string{ - opts.Verb, - opts.Path, - EC2CredentialsBuildCanonicalQueryStringV4(opts.Verb, opts.Params), - EC2CredentialsBuildCanonicalHeadersV4(opts.Headers, signedHeaders), - signedHeaders, - bodyHash, - }, "\n") - hash := sha256.Sum256([]byte(canonicalRequest)) - - return []byte(strings.Join([]string{ - EC2CredentialsAwsHmacV4, - date.Format(EC2CredentialsTimestampFormatV4), - scope, - hex.EncodeToString(hash[:]), - }, "\n")) -} - -// EC2CredentialsBuildSignatureV4 builds an AWS v4 signature based on input -// parameters. -// https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L285..L286 -func EC2CredentialsBuildSignatureV4(key []byte, stringToSign []byte) string { - return hex.EncodeToString(sumHMAC256(key, stringToSign)) -} - -// EC2CredentialsBuildAuthorizationHeaderV4 builds an AWS v4 Authorization -// header based on auth parameters, date and signature -func EC2CredentialsBuildAuthorizationHeaderV4(opts AuthOptions, signedHeaders string, signature string, date time.Time) string { - return fmt.Sprintf("%s Credential=%s/%s/%s/%s/%s, SignedHeaders=%s, Signature=%s", - EC2CredentialsAwsHmacV4, - opts.Access, - date.Format(EC2CredentialsDateFormatV4), - opts.Region, - opts.Service, - EC2CredentialsAwsRequestV4, - signedHeaders, - signature) -} - -// ToTokenV3ScopeMap is a dummy method to satisfy tokens.AuthOptionsBuilder -// interface. -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - return nil, nil -} - -// ToTokenV3HeadersMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v3 tokens package. -func (opts *AuthOptions) ToTokenV3HeadersMap(map[string]interface{}) (map[string]string, error) { - return nil, nil -} - -// CanReauth is a method method to satisfy tokens.AuthOptionsBuilder interface -func (opts *AuthOptions) CanReauth() bool { - return opts.AllowReauth -} - -// ToTokenV3CreateMap formats an AuthOptions into a create request. -func (opts *AuthOptions) ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) { - b, err := gophercloud.BuildRequestBody(opts, "credentials") - if err != nil { - return nil, err - } - - if opts.Signature != nil { - return b, nil - } - - // calculate signature, when it is not set - c, _ := b["credentials"].(map[string]interface{}) - h := interfaceToMap(c, "headers") - p := interfaceToMap(c, "params") - - // detect and process a signature v2 - if v, ok := p["SignatureVersion"]; ok && v == "2" { - if _, ok := c["body_hash"]; ok { - delete(c, "body_hash") - } - if _, ok := c["headers"]; ok { - delete(c, "headers") - } - if v, ok := p["SignatureMethod"]; ok { - // params is a map of strings - strToSign := EC2CredentialsBuildStringToSignV2(*opts) - switch v { - case EC2CredentialsHmacSha1V2: - // keystone uses this method only when HmacSHA256 is not available on the server side - // https://github.com/openstack/python-keystoneclient/blob/stable/train/keystoneclient/contrib/ec2/utils.py#L151..L156 - c["signature"] = sumHMAC1([]byte(opts.Secret), strToSign) - return b, nil - case EC2CredentialsHmacSha256V2: - c["signature"] = sumHMAC256([]byte(opts.Secret), strToSign) - return b, nil - } - return nil, fmt.Errorf("unsupported signature method: %s", v) - } - return nil, fmt.Errorf("signature method must be provided") - } else if ok { - return nil, fmt.Errorf("unsupported signature version: %s", v) - } - - // it is not a signature v2, but a signature v4 - date := time.Now().UTC() - if opts.Timestamp != nil { - date = *opts.Timestamp - } - if v, _ := c["body_hash"]; v == nil { - // when body_hash is not set, generate a random one - c["body_hash"] = randomBodyHash() - } - - signedHeaders, _ := h["X-Amz-SignedHeaders"] - - stringToSign := EC2CredentialsBuildStringToSignV4(*opts, signedHeaders, c["body_hash"].(string), date) - key := EC2CredentialsBuildSignatureKeyV4(opts.Secret, opts.Region, opts.Service, date) - c["signature"] = EC2CredentialsBuildSignatureV4(key, stringToSign) - h["X-Amz-Date"] = date.Format(EC2CredentialsTimestampFormatV4) - h["Authorization"] = EC2CredentialsBuildAuthorizationHeaderV4(*opts, signedHeaders, c["signature"].(string), date) - - // token is only used for S3 tokens validation and will be removed when using EC2 validation - c["token"] = stringToSign - - return b, nil -} - -// Create authenticates and either generates a new token from EC2 credentials -func Create(c *gophercloud.ServiceClient, opts tokens.AuthOptionsBuilder) (r tokens.CreateResult) { - b, err := opts.ToTokenV3CreateMap(nil) - if err != nil { - r.Err = err - return - } - - // delete "token" element, since it is used in s3tokens - deleteBodyElements(b, "token") - - resp, err := c.Post(ec2tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// ValidateS3Token authenticates an S3 request using EC2 credentials. Doesn't -// generate a new token ID, but returns a tokens.CreateResult. -func ValidateS3Token(c *gophercloud.ServiceClient, opts tokens.AuthOptionsBuilder) (r tokens.CreateResult) { - b, err := opts.ToTokenV3CreateMap(nil) - if err != nil { - r.Err = err - return - } - - // delete unused element, since it is used in ec2tokens only - deleteBodyElements(b, "body_hash", "headers", "host", "params", "path", "verb") - - resp, err := c.Post(s3tokensURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// The following are small helper functions used to help build the signature. - -// sumHMAC1 is a func to implement the HMAC SHA1 signature method. -func sumHMAC1(key []byte, data []byte) []byte { - hash := hmac.New(sha1.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// sumHMAC256 is a func to implement the HMAC SHA256 signature method. -func sumHMAC256(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -// randomBodyHash is a func to generate a random sha256 hexdigest. -func randomBodyHash() string { - h := make([]byte, 64) - rand.Read(h) - return hex.EncodeToString(h) -} - -// interfaceToMap is a func used to represent a "credentials" map element as a -// "map[string]string" -func interfaceToMap(c map[string]interface{}, key string) map[string]string { - // convert map[string]interface{} to map[string]string - m := make(map[string]string) - if v, _ := c[key].(map[string]interface{}); v != nil { - for k, v := range v { - m[k] = v.(string) - } - } - - c[key] = m - - return m -} - -// deleteBodyElements deletes map body elements -func deleteBodyElements(b map[string]interface{}, elements ...string) { - if c, ok := b["credentials"].(map[string]interface{}); ok { - for _, k := range elements { - if _, ok := c[k]; ok { - delete(c, k) - } - } - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/urls.go deleted file mode 100644 index 84b33b282ea..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens/urls.go +++ /dev/null @@ -1,11 +0,0 @@ -package ec2tokens - -import "github.com/gophercloud/gophercloud" - -func ec2tokensURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("ec2tokens") -} - -func s3tokensURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("s3tokens") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go deleted file mode 100644 index c5b0831ca1f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/doc.go +++ /dev/null @@ -1,123 +0,0 @@ -/* -Package oauth1 enables management of OpenStack OAuth1 tokens and Authentication. - -Example to Create an OAuth1 Consumer - - createConsumerOpts := oauth1.CreateConsumerOpts{ - Description: "My consumer", - } - consumer, err := oauth1.CreateConsumer(identityClient, createConsumerOpts).Extract() - if err != nil { - panic(err) - } - - // NOTE: Consumer secret is available only on create response - fmt.Printf("Consumer: %+v\n", consumer) - -Example to Request an unauthorized OAuth1 token - - requestTokenOpts := oauth1.RequestTokenOpts{ - OAuthConsumerKey: consumer.ID, - OAuthConsumerSecret: consumer.Secret, - OAuthSignatureMethod: oauth1.HMACSHA1, - RequestedProjectID: projectID, - } - requestToken, err := oauth1.RequestToken(identityClient, requestTokenOpts).Extract() - if err != nil { - panic(err) - } - - // NOTE: Request token secret is available only on request response - fmt.Printf("Request token: %+v\n", requestToken) - -Example to Authorize an unauthorized OAuth1 token - - authorizeTokenOpts := oauth1.AuthorizeTokenOpts{ - Roles: []oauth1.Role{ - {Name: "member"}, - }, - } - authToken, err := oauth1.AuthorizeToken(identityClient, requestToken.OAuthToken, authorizeTokenOpts).Extract() - if err != nil { - panic(err) - } - - fmt.Printf("Verifier ID of the unauthorized Token: %+v\n", authToken.OAuthVerifier) - -Example to Create an OAuth1 Access Token - - accessTokenOpts := oauth1.CreateAccessTokenOpts{ - OAuthConsumerKey: consumer.ID, - OAuthConsumerSecret: consumer.Secret, - OAuthToken: requestToken.OAuthToken, - OAuthTokenSecret: requestToken.OAuthTokenSecret, - OAuthVerifier: authToken.OAuthVerifier, - OAuthSignatureMethod: oauth1.HMACSHA1, - } - accessToken, err := oauth1.CreateAccessToken(identityClient, accessTokenOpts).Extract() - if err != nil { - panic(err) - } - - // NOTE: Access token secret is available only on create response - fmt.Printf("OAuth1 Access Token: %+v\n", accessToken) - -Example to List User's OAuth1 Access Tokens - - allPages, err := oauth1.ListAccessTokens(identityClient, userID).AllPages() - if err != nil { - panic(err) - } - accessTokens, err := oauth1.ExtractAccessTokens(allPages) - if err != nil { - panic(err) - } - - for _, accessToken := range accessTokens { - fmt.Printf("Access Token: %+v\n", accessToken) - } - -Example to Authenticate a client using OAuth1 method - - client, err := openstack.NewClient("http://localhost:5000/v3") - if err != nil { - panic(err) - } - - authOptions := &oauth1.AuthOptions{ - // consumer token, created earlier - OAuthConsumerKey: consumer.ID, - OAuthConsumerSecret: consumer.Secret, - // access token, created earlier - OAuthToken: accessToken.OAuthToken, - OAuthTokenSecret: accessToken.OAuthTokenSecret, - OAuthSignatureMethod: oauth1.HMACSHA1, - } - err = openstack.AuthenticateV3(client, authOptions, gophercloud.EndpointOpts{}) - if err != nil { - panic(err) - } - -Example to Create a Token using OAuth1 method - - var oauth1Token struct { - tokens.Token - oauth1.TokenExt - } - - createOpts := &oauth1.AuthOptions{ - // consumer token, created earlier - OAuthConsumerKey: consumer.ID, - OAuthConsumerSecret: consumer.Secret, - // access token, created earlier - OAuthToken: accessToken.OAuthToken, - OAuthTokenSecret: accessToken.OAuthTokenSecret, - OAuthSignatureMethod: oauth1.HMACSHA1, - } - err := tokens.Create(identityClient, createOpts).ExtractInto(&oauth1Token) - if err != nil { - panic(err) - } - -*/ -package oauth1 diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/requests.go deleted file mode 100644 index 028b5a45bd7..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/requests.go +++ /dev/null @@ -1,587 +0,0 @@ -package oauth1 - -import ( - "crypto/hmac" - "crypto/sha1" - "encoding/base64" - "fmt" - "io/ioutil" - "math/rand" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" - "github.com/gophercloud/gophercloud/pagination" -) - -// Type SignatureMethod is a OAuth1 SignatureMethod type. -type SignatureMethod string - -const ( - // HMACSHA1 is a recommended OAuth1 signature method. - HMACSHA1 SignatureMethod = "HMAC-SHA1" - - // PLAINTEXT signature method is not recommended to be used in - // production environment. - PLAINTEXT SignatureMethod = "PLAINTEXT" - - // OAuth1TokenContentType is a supported content type for an OAuth1 - // token. - OAuth1TokenContentType = "application/x-www-form-urlencoded" -) - -// AuthOptions represents options for authenticating a user using OAuth1 tokens. -type AuthOptions struct { - // OAuthConsumerKey is the OAuth1 Consumer Key. - OAuthConsumerKey string `q:"oauth_consumer_key" required:"true"` - - // OAuthConsumerSecret is the OAuth1 Consumer Secret. Used to generate - // an OAuth1 request signature. - OAuthConsumerSecret string `required:"true"` - - // OAuthToken is the OAuth1 Request Token. - OAuthToken string `q:"oauth_token" required:"true"` - - // OAuthTokenSecret is the OAuth1 Request Token Secret. Used to generate - // an OAuth1 request signature. - OAuthTokenSecret string `required:"true"` - - // OAuthSignatureMethod is the OAuth1 signature method the Consumer used - // to sign the request. Supported values are "HMAC-SHA1" or "PLAINTEXT". - // "PLAINTEXT" is not recommended for production usage. - OAuthSignatureMethod SignatureMethod `q:"oauth_signature_method" required:"true"` - - // OAuthTimestamp is an OAuth1 request timestamp. If nil, current Unix - // timestamp will be used. - OAuthTimestamp *time.Time - - // OAuthNonce is an OAuth1 request nonce. Nonce must be a random string, - // uniquely generated for each request. Will be generated automatically - // when it is not set. - OAuthNonce string `q:"oauth_nonce"` - - // AllowReauth allows Gophercloud to re-authenticate automatically - // if/when your token expires. - AllowReauth bool -} - -// ToTokenV3HeadersMap builds the headers required for an OAuth1-based create -// request. -func (opts AuthOptions) ToTokenV3HeadersMap(headerOpts map[string]interface{}) (map[string]string, error) { - q, err := buildOAuth1QueryString(opts, opts.OAuthTimestamp, "") - if err != nil { - return nil, err - } - - signatureKeys := []string{opts.OAuthConsumerSecret, opts.OAuthTokenSecret} - - method := headerOpts["method"].(string) - u := headerOpts["url"].(string) - stringToSign := buildStringToSign(method, u, q.Query()) - signature := url.QueryEscape(signString(opts.OAuthSignatureMethod, stringToSign, signatureKeys)) - - authHeader := buildAuthHeader(q.Query(), signature) - - headers := map[string]string{ - "Authorization": authHeader, - "X-Auth-Token": "", - } - - return headers, nil -} - -// ToTokenV3ScopeMap allows AuthOptions to satisfy the tokens.AuthOptionsBuilder -// interface. -func (opts AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - return nil, nil -} - -// CanReauth allows AuthOptions to satisfy the tokens.AuthOptionsBuilder -// interface. -func (opts AuthOptions) CanReauth() bool { - return opts.AllowReauth -} - -// ToTokenV3CreateMap builds a create request body. -func (opts AuthOptions) ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) { - // identityReq defines the "identity" portion of an OAuth1-based authentication - // create request body. - type identityReq struct { - Methods []string `json:"methods"` - OAuth1 struct{} `json:"oauth1"` - } - - // authReq defines the "auth" portion of an OAuth1-based authentication - // create request body. - type authReq struct { - Identity identityReq `json:"identity"` - } - - // oauth1Request defines how an OAuth1-based authentication create - // request body looks. - type oauth1Request struct { - Auth authReq `json:"auth"` - } - - var req oauth1Request - - req.Auth.Identity.Methods = []string{"oauth1"} - return gophercloud.BuildRequestBody(req, "") -} - -// Create authenticates and either generates a new OpenStack token from an -// OAuth1 token. -func Create(client *gophercloud.ServiceClient, opts tokens.AuthOptionsBuilder) (r tokens.CreateResult) { - b, err := opts.ToTokenV3CreateMap(nil) - if err != nil { - r.Err = err - return - } - - headerOpts := map[string]interface{}{ - "method": "POST", - "url": authURL(client), - } - - h, err := opts.ToTokenV3HeadersMap(headerOpts) - if err != nil { - r.Err = err - return - } - - resp, err := client.Post(authURL(client), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// CreateConsumerOptsBuilder allows extensions to add additional parameters to -// the CreateConsumer request. -type CreateConsumerOptsBuilder interface { - ToOAuth1CreateConsumerMap() (map[string]interface{}, error) -} - -// CreateConsumerOpts provides options used to create a new Consumer. -type CreateConsumerOpts struct { - // Description is the consumer description. - Description string `json:"description"` -} - -// ToOAuth1CreateConsumerMap formats a CreateConsumerOpts into a create request. -func (opts CreateConsumerOpts) ToOAuth1CreateConsumerMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "consumer") -} - -// Create creates a new Consumer. -func CreateConsumer(client *gophercloud.ServiceClient, opts CreateConsumerOptsBuilder) (r CreateConsumerResult) { - b, err := opts.ToOAuth1CreateConsumerMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Post(consumersURL(client), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{201}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Delete deletes a Consumer. -func DeleteConsumer(client *gophercloud.ServiceClient, id string) (r DeleteConsumerResult) { - resp, err := client.Delete(consumerURL(client, id), nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// List enumerates Consumers. -func ListConsumers(client *gophercloud.ServiceClient) pagination.Pager { - return pagination.NewPager(client, consumersURL(client), func(r pagination.PageResult) pagination.Page { - return ConsumersPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// GetConsumer retrieves details on a single Consumer by ID. -func GetConsumer(client *gophercloud.ServiceClient, id string) (r GetConsumerResult) { - resp, err := client.Get(consumerURL(client, id), &r.Body, nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// UpdateConsumerOpts provides options used to update a consumer. -type UpdateConsumerOpts struct { - // Description is the consumer description. - Description string `json:"description"` -} - -// ToOAuth1UpdateConsumerMap formats an UpdateConsumerOpts into a consumer update -// request. -func (opts UpdateConsumerOpts) ToOAuth1UpdateConsumerMap() (map[string]interface{}, error) { - return gophercloud.BuildRequestBody(opts, "consumer") -} - -// UpdateConsumer updates an existing Consumer. -func UpdateConsumer(client *gophercloud.ServiceClient, id string, opts UpdateConsumerOpts) (r UpdateConsumerResult) { - b, err := opts.ToOAuth1UpdateConsumerMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Patch(consumerURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// RequestTokenOptsBuilder allows extensions to add additional parameters to the -// RequestToken request. -type RequestTokenOptsBuilder interface { - ToOAuth1RequestTokenHeaders(string, string) (map[string]string, error) -} - -// RequestTokenOpts provides options used to get a consumer unauthorized -// request token. -type RequestTokenOpts struct { - // OAuthConsumerKey is the OAuth1 Consumer Key. - OAuthConsumerKey string `q:"oauth_consumer_key" required:"true"` - - // OAuthConsumerSecret is the OAuth1 Consumer Secret. Used to generate - // an OAuth1 request signature. - OAuthConsumerSecret string `required:"true"` - - // OAuthSignatureMethod is the OAuth1 signature method the Consumer used - // to sign the request. Supported values are "HMAC-SHA1" or "PLAINTEXT". - // "PLAINTEXT" is not recommended for production usage. - OAuthSignatureMethod SignatureMethod `q:"oauth_signature_method" required:"true"` - - // OAuthTimestamp is an OAuth1 request timestamp. If nil, current Unix - // timestamp will be used. - OAuthTimestamp *time.Time - - // OAuthNonce is an OAuth1 request nonce. Nonce must be a random string, - // uniquely generated for each request. Will be generated automatically - // when it is not set. - OAuthNonce string `q:"oauth_nonce"` - - // RequestedProjectID is a Project ID a consumer user requested an - // access to. - RequestedProjectID string `h:"Requested-Project-Id"` -} - -// ToOAuth1RequestTokenHeaders formats a RequestTokenOpts into a map of request -// headers. -func (opts RequestTokenOpts) ToOAuth1RequestTokenHeaders(method, u string) (map[string]string, error) { - q, err := buildOAuth1QueryString(opts, opts.OAuthTimestamp, "oob") - if err != nil { - return nil, err - } - - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - - signatureKeys := []string{opts.OAuthConsumerSecret} - stringToSign := buildStringToSign(method, u, q.Query()) - signature := url.QueryEscape(signString(opts.OAuthSignatureMethod, stringToSign, signatureKeys)) - authHeader := buildAuthHeader(q.Query(), signature) - - h["Authorization"] = authHeader - - return h, nil -} - -// RequestToken requests an unauthorized OAuth1 Token. -func RequestToken(client *gophercloud.ServiceClient, opts RequestTokenOptsBuilder) (r TokenResult) { - h, err := opts.ToOAuth1RequestTokenHeaders("POST", requestTokenURL(client)) - if err != nil { - r.Err = err - return - } - - resp, err := client.Post(requestTokenURL(client), nil, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201}, - KeepResponseBody: true, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - if r.Err != nil { - return - } - defer resp.Body.Close() - if v := r.Header.Get("Content-Type"); v != OAuth1TokenContentType { - r.Err = fmt.Errorf("unsupported Content-Type: %q", v) - return - } - r.Body, r.Err = ioutil.ReadAll(resp.Body) - return -} - -// AuthorizeTokenOptsBuilder allows extensions to add additional parameters to -// the AuthorizeToken request. -type AuthorizeTokenOptsBuilder interface { - ToOAuth1AuthorizeTokenMap() (map[string]interface{}, error) -} - -// AuthorizeTokenOpts provides options used to authorize a request token. -type AuthorizeTokenOpts struct { - Roles []Role `json:"roles"` -} - -// Role is a struct representing a role object in a AuthorizeTokenOpts struct. -type Role struct { - ID string `json:"id,omitempty"` - Name string `json:"name,omitempty"` -} - -// ToOAuth1AuthorizeTokenMap formats an AuthorizeTokenOpts into an authorize token -// request. -func (opts AuthorizeTokenOpts) ToOAuth1AuthorizeTokenMap() (map[string]interface{}, error) { - for _, r := range opts.Roles { - if r == (Role{}) { - return nil, fmt.Errorf("role must not be empty") - } - } - return gophercloud.BuildRequestBody(opts, "") -} - -// AuthorizeToken authorizes an unauthorized consumer token. -func AuthorizeToken(client *gophercloud.ServiceClient, id string, opts AuthorizeTokenOptsBuilder) (r AuthorizeTokenResult) { - b, err := opts.ToOAuth1AuthorizeTokenMap() - if err != nil { - r.Err = err - return - } - resp, err := client.Put(authorizeTokenURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// CreateAccessTokenOptsBuilder allows extensions to add additional parameters -// to the CreateAccessToken request. -type CreateAccessTokenOptsBuilder interface { - ToOAuth1CreateAccessTokenHeaders(string, string) (map[string]string, error) -} - -// CreateAccessTokenOpts provides options used to create an OAuth1 token. -type CreateAccessTokenOpts struct { - // OAuthConsumerKey is the OAuth1 Consumer Key. - OAuthConsumerKey string `q:"oauth_consumer_key" required:"true"` - - // OAuthConsumerSecret is the OAuth1 Consumer Secret. Used to generate - // an OAuth1 request signature. - OAuthConsumerSecret string `required:"true"` - - // OAuthToken is the OAuth1 Request Token. - OAuthToken string `q:"oauth_token" required:"true"` - - // OAuthTokenSecret is the OAuth1 Request Token Secret. Used to generate - // an OAuth1 request signature. - OAuthTokenSecret string `required:"true"` - - // OAuthVerifier is the OAuth1 verification code. - OAuthVerifier string `q:"oauth_verifier" required:"true"` - - // OAuthSignatureMethod is the OAuth1 signature method the Consumer used - // to sign the request. Supported values are "HMAC-SHA1" or "PLAINTEXT". - // "PLAINTEXT" is not recommended for production usage. - OAuthSignatureMethod SignatureMethod `q:"oauth_signature_method" required:"true"` - - // OAuthTimestamp is an OAuth1 request timestamp. If nil, current Unix - // timestamp will be used. - OAuthTimestamp *time.Time - - // OAuthNonce is an OAuth1 request nonce. Nonce must be a random string, - // uniquely generated for each request. Will be generated automatically - // when it is not set. - OAuthNonce string `q:"oauth_nonce"` -} - -// ToOAuth1CreateAccessTokenHeaders formats a CreateAccessTokenOpts into a map of -// request headers. -func (opts CreateAccessTokenOpts) ToOAuth1CreateAccessTokenHeaders(method, u string) (map[string]string, error) { - q, err := buildOAuth1QueryString(opts, opts.OAuthTimestamp, "") - if err != nil { - return nil, err - } - - signatureKeys := []string{opts.OAuthConsumerSecret, opts.OAuthTokenSecret} - stringToSign := buildStringToSign(method, u, q.Query()) - signature := url.QueryEscape(signString(opts.OAuthSignatureMethod, stringToSign, signatureKeys)) - authHeader := buildAuthHeader(q.Query(), signature) - - headers := map[string]string{ - "Authorization": authHeader, - } - - return headers, nil -} - -// CreateAccessToken creates a new OAuth1 Access Token -func CreateAccessToken(client *gophercloud.ServiceClient, opts CreateAccessTokenOptsBuilder) (r TokenResult) { - h, err := opts.ToOAuth1CreateAccessTokenHeaders("POST", createAccessTokenURL(client)) - if err != nil { - r.Err = err - return - } - - resp, err := client.Post(createAccessTokenURL(client), nil, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201}, - KeepResponseBody: true, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - if r.Err != nil { - return - } - defer resp.Body.Close() - if v := r.Header.Get("Content-Type"); v != OAuth1TokenContentType { - r.Err = fmt.Errorf("unsupported Content-Type: %q", v) - return - } - r.Body, r.Err = ioutil.ReadAll(resp.Body) - return -} - -// GetAccessToken retrieves details on a single OAuth1 access token by an ID. -func GetAccessToken(client *gophercloud.ServiceClient, userID string, id string) (r GetAccessTokenResult) { - resp, err := client.Get(userAccessTokenURL(client, userID, id), &r.Body, nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// RevokeAccessToken revokes an OAuth1 access token. -func RevokeAccessToken(client *gophercloud.ServiceClient, userID string, id string) (r RevokeAccessTokenResult) { - resp, err := client.Delete(userAccessTokenURL(client, userID, id), nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// ListAccessTokens enumerates authorized access tokens. -func ListAccessTokens(client *gophercloud.ServiceClient, userID string) pagination.Pager { - url := userAccessTokensURL(client, userID) - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return AccessTokensPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// ListAccessTokenRoles enumerates authorized access token roles. -func ListAccessTokenRoles(client *gophercloud.ServiceClient, userID string, id string) pagination.Pager { - url := userAccessTokenRolesURL(client, userID, id) - return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { - return AccessTokenRolesPage{pagination.LinkedPageBase{PageResult: r}} - }) -} - -// GetAccessTokenRole retrieves details on a single OAuth1 access token role by -// an ID. -func GetAccessTokenRole(client *gophercloud.ServiceClient, userID string, id string, roleID string) (r GetAccessTokenRoleResult) { - resp, err := client.Get(userAccessTokenRoleURL(client, userID, id, roleID), &r.Body, nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// The following are small helper functions used to help build the signature. - -// buildOAuth1QueryString builds a URLEncoded parameters string specific for -// OAuth1-based requests. -func buildOAuth1QueryString(opts interface{}, timestamp *time.Time, callback string) (*url.URL, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return nil, err - } - - query := q.Query() - - if timestamp != nil { - // use provided timestamp - query.Set("oauth_timestamp", strconv.FormatInt(timestamp.Unix(), 10)) - } else { - // use current timestamp - query.Set("oauth_timestamp", strconv.FormatInt(time.Now().UTC().Unix(), 10)) - } - - if query.Get("oauth_nonce") == "" { - // when nonce is not set, generate a random one - query.Set("oauth_nonce", strconv.FormatInt(rand.Int63(), 10)+query.Get("oauth_timestamp")) - } - - if callback != "" { - query.Set("oauth_callback", callback) - } - query.Set("oauth_version", "1.0") - - return &url.URL{RawQuery: query.Encode()}, nil -} - -// buildStringToSign builds a string to be signed. -func buildStringToSign(method string, u string, query url.Values) []byte { - parsedURL, _ := url.Parse(u) - p := parsedURL.Port() - s := parsedURL.Scheme - - // Default scheme port must be stripped - if s == "http" && p == "80" || s == "https" && p == "443" { - parsedURL.Host = strings.TrimSuffix(parsedURL.Host, ":"+p) - } - - // Ensure that URL doesn't contain queries - parsedURL.RawQuery = "" - - v := strings.Join( - []string{method, url.QueryEscape(parsedURL.String()), url.QueryEscape(query.Encode())}, "&") - - return []byte(v) -} - -// signString signs a string using an OAuth1 signature method. -func signString(signatureMethod SignatureMethod, strToSign []byte, signatureKeys []string) string { - var key []byte - for i, k := range signatureKeys { - key = append(key, []byte(url.QueryEscape(k))...) - if i == 0 { - key = append(key, '&') - } - } - - var signedString string - switch signatureMethod { - case PLAINTEXT: - signedString = string(key) - default: - h := hmac.New(sha1.New, key) - h.Write(strToSign) - signedString = base64.StdEncoding.EncodeToString(h.Sum(nil)) - } - - return signedString -} - -// buildAuthHeader generates an OAuth1 Authorization header with a signature -// calculated using an OAuth1 signature method. -func buildAuthHeader(query url.Values, signature string) string { - var authHeader []string - var keys []string - for k := range query { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - for _, v := range query[k] { - authHeader = append(authHeader, fmt.Sprintf("%s=%q", k, url.QueryEscape(v))) - } - } - - authHeader = append(authHeader, fmt.Sprintf("oauth_signature=%q", signature)) - - return "OAuth " + strings.Join(authHeader, ", ") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go deleted file mode 100644 index a67f9381d62..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/results.go +++ /dev/null @@ -1,305 +0,0 @@ -package oauth1 - -import ( - "encoding/json" - "net/url" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Consumer represents a delegated authorization request between two -// identities. -type Consumer struct { - ID string `json:"id"` - Secret string `json:"secret"` - Description string `json:"description"` -} - -type consumerResult struct { - gophercloud.Result -} - -// CreateConsumerResult is the response from a Create operation. Call its -// Extract method to interpret it as a Consumer. -type CreateConsumerResult struct { - consumerResult -} - -// UpdateConsumerResult is the response from a Create operation. Call its -// Extract method to interpret it as a Consumer. -type UpdateConsumerResult struct { - consumerResult -} - -// DeleteConsumerResult is the response from a Delete operation. Call its -// ExtractErr to determine if the request succeeded or failed. -type DeleteConsumerResult struct { - gophercloud.ErrResult -} - -// ConsumersPage is a single page of Region results. -type ConsumersPage struct { - pagination.LinkedPageBase -} - -// GetConsumerResult is the response from a Get operation. Call its Extract -// method to interpret it as a Consumer. -type GetConsumerResult struct { - consumerResult -} - -// IsEmpty determines whether or not a page of Consumers contains any results. -func (c ConsumersPage) IsEmpty() (bool, error) { - consumers, err := ExtractConsumers(c) - return len(consumers) == 0, err -} - -// NextPageURL extracts the "next" link from the links section of the result. -func (c ConsumersPage) NextPageURL() (string, error) { - var s struct { - Links struct { - Next string `json:"next"` - Previous string `json:"previous"` - } `json:"links"` - } - err := c.ExtractInto(&s) - if err != nil { - return "", err - } - return s.Links.Next, err -} - -// ExtractConsumers returns a slice of Consumers contained in a single page of -// results. -func ExtractConsumers(r pagination.Page) ([]Consumer, error) { - var s struct { - Consumers []Consumer `json:"consumers"` - } - err := (r.(ConsumersPage)).ExtractInto(&s) - return s.Consumers, err -} - -// Extract interprets any consumer result as a Consumer. -func (c consumerResult) Extract() (*Consumer, error) { - var s struct { - Consumer *Consumer `json:"consumer"` - } - err := c.ExtractInto(&s) - return s.Consumer, err -} - -// Token contains an OAuth1 token. -type Token struct { - // OAuthToken is the key value for the oauth token that the Identity API returns. - OAuthToken string `q:"oauth_token"` - // OAuthTokenSecret is the secret value associated with the OAuth Token. - OAuthTokenSecret string `q:"oauth_token_secret"` - // OAuthExpiresAt is the date and time when an OAuth token expires. - OAuthExpiresAt *time.Time `q:"-"` -} - -// TokenResult is a struct to handle -// "Content-Type: application/x-www-form-urlencoded" response. -type TokenResult struct { - gophercloud.Result - Body []byte -} - -// Extract interprets any OAuth1 token result as a Token. -func (r TokenResult) Extract() (*Token, error) { - if r.Err != nil { - return nil, r.Err - } - - values, err := url.ParseQuery(string(r.Body)) - if err != nil { - return nil, err - } - - token := &Token{ - OAuthToken: values.Get("oauth_token"), - OAuthTokenSecret: values.Get("oauth_token_secret"), - } - - if v := values.Get("oauth_expires_at"); v != "" { - if t, err := time.Parse(gophercloud.RFC3339Milli, v); err != nil { - return nil, err - } else { - token.OAuthExpiresAt = &t - } - } - - return token, nil -} - -// AuthorizedToken contains an OAuth1 authorized token info. -type AuthorizedToken struct { - // OAuthVerifier is the ID of the token verifier. - OAuthVerifier string `json:"oauth_verifier"` -} - -type AuthorizeTokenResult struct { - gophercloud.Result -} - -// Extract interprets AuthorizeTokenResult result as a AuthorizedToken. -func (r AuthorizeTokenResult) Extract() (*AuthorizedToken, error) { - var s struct { - AuthorizedToken *AuthorizedToken `json:"token"` - } - err := r.ExtractInto(&s) - return s.AuthorizedToken, err -} - -// AccessToken represents an AccessToken response as a struct. -type AccessToken struct { - ID string `json:"id"` - ConsumerID string `json:"consumer_id"` - ProjectID string `json:"project_id"` - AuthorizingUserID string `json:"authorizing_user_id"` - ExpiresAt *time.Time `json:"-"` -} - -func (r *AccessToken) UnmarshalJSON(b []byte) error { - type tmp AccessToken - var s struct { - tmp - ExpiresAt *gophercloud.JSONRFC3339Milli `json:"expires_at"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - *r = AccessToken(s.tmp) - - if s.ExpiresAt != nil { - t := time.Time(*s.ExpiresAt) - r.ExpiresAt = &t - } - - return nil -} - -type GetAccessTokenResult struct { - gophercloud.Result -} - -// Extract interprets any GetAccessTokenResult result as an AccessToken. -func (r GetAccessTokenResult) Extract() (*AccessToken, error) { - var s struct { - AccessToken *AccessToken `json:"access_token"` - } - err := r.ExtractInto(&s) - return s.AccessToken, err -} - -// RevokeAccessTokenResult is the response from a Delete operation. Call its -// ExtractErr to determine if the request succeeded or failed. -type RevokeAccessTokenResult struct { - gophercloud.ErrResult -} - -// AccessTokensPage is a single page of Access Tokens results. -type AccessTokensPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a an AccessTokensPage contains any results. -func (r AccessTokensPage) IsEmpty() (bool, error) { - accessTokens, err := ExtractAccessTokens(r) - return len(accessTokens) == 0, err -} - -// NextPageURL extracts the "next" link from the links section of the result. -func (r AccessTokensPage) NextPageURL() (string, error) { - var s struct { - Links struct { - Next string `json:"next"` - Previous string `json:"previous"` - } `json:"links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return s.Links.Next, err -} - -// ExtractAccessTokens returns a slice of AccessTokens contained in a single -// page of results. -func ExtractAccessTokens(r pagination.Page) ([]AccessToken, error) { - var s struct { - AccessTokens []AccessToken `json:"access_tokens"` - } - err := (r.(AccessTokensPage)).ExtractInto(&s) - return s.AccessTokens, err -} - -// AccessTokenRole represents an Access Token Role struct. -type AccessTokenRole struct { - ID string `json:"id"` - Name string `json:"name"` - DomainID string `json:"domain_id"` -} - -// AccessTokenRolesPage is a single page of Access Token roles results. -type AccessTokenRolesPage struct { - pagination.LinkedPageBase -} - -// IsEmpty determines whether or not a an AccessTokensPage contains any results. -func (r AccessTokenRolesPage) IsEmpty() (bool, error) { - accessTokenRoles, err := ExtractAccessTokenRoles(r) - return len(accessTokenRoles) == 0, err -} - -// NextPageURL extracts the "next" link from the links section of the result. -func (r AccessTokenRolesPage) NextPageURL() (string, error) { - var s struct { - Links struct { - Next string `json:"next"` - Previous string `json:"previous"` - } `json:"links"` - } - err := r.ExtractInto(&s) - if err != nil { - return "", err - } - return s.Links.Next, err -} - -// ExtractAccessTokenRoles returns a slice of AccessTokenRole contained in a -// single page of results. -func ExtractAccessTokenRoles(r pagination.Page) ([]AccessTokenRole, error) { - var s struct { - AccessTokenRoles []AccessTokenRole `json:"roles"` - } - err := (r.(AccessTokenRolesPage)).ExtractInto(&s) - return s.AccessTokenRoles, err -} - -type GetAccessTokenRoleResult struct { - gophercloud.Result -} - -// Extract interprets any GetAccessTokenRoleResult result as an AccessTokenRole. -func (r GetAccessTokenRoleResult) Extract() (*AccessTokenRole, error) { - var s struct { - AccessTokenRole *AccessTokenRole `json:"role"` - } - err := r.ExtractInto(&s) - return s.AccessTokenRole, err -} - -// OAuth1 is an OAuth1 object, returned in OAuth1 token result. -type OAuth1 struct { - AccessTokenID string `json:"access_token_id"` - ConsumerID string `json:"consumer_id"` -} - -// TokenExt represents an extension of the base token result. -type TokenExt struct { - OAuth1 OAuth1 `json:"OS-OAUTH1"` -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/urls.go deleted file mode 100644 index 9b51d53b31d..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1/urls.go +++ /dev/null @@ -1,43 +0,0 @@ -package oauth1 - -import "github.com/gophercloud/gophercloud" - -func consumersURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("OS-OAUTH1", "consumers") -} - -func consumerURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("OS-OAUTH1", "consumers", id) -} - -func requestTokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("OS-OAUTH1", "request_token") -} - -func authorizeTokenURL(c *gophercloud.ServiceClient, id string) string { - return c.ServiceURL("OS-OAUTH1", "authorize", id) -} - -func createAccessTokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("OS-OAUTH1", "access_token") -} - -func userAccessTokensURL(c *gophercloud.ServiceClient, userID string) string { - return c.ServiceURL("users", userID, "OS-OAUTH1", "access_tokens") -} - -func userAccessTokenURL(c *gophercloud.ServiceClient, userID string, id string) string { - return c.ServiceURL("users", userID, "OS-OAUTH1", "access_tokens", id) -} - -func userAccessTokenRolesURL(c *gophercloud.ServiceClient, userID string, id string) string { - return c.ServiceURL("users", userID, "OS-OAUTH1", "access_tokens", id, "roles") -} - -func userAccessTokenRoleURL(c *gophercloud.ServiceClient, userID string, id string, roleID string) string { - return c.ServiceURL("users", userID, "OS-OAUTH1", "access_tokens", id, "roles", roleID) -} - -func authURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("auth", "tokens") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go deleted file mode 100644 index 966e128f128..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go +++ /dev/null @@ -1,108 +0,0 @@ -/* -Package tokens provides information and interaction with the token API -resource for the OpenStack Identity service. - -For more information, see: -http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 - -Example to Create a Token From a Username and Password - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Username, Password, and Domain - - authOptions := tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainID: "default", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - - authOptions = tokens.AuthOptions{ - UserID: "username", - Password: "password", - DomainName: "default", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token From a Token - - authOptions := tokens.AuthOptions{ - TokenID: "token_id", - } - - token, err := tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project ID Scope - - scope := tokens.Scope{ - ProjectID: "0fe36e73809d46aeae6705c39077b1b3", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Domain ID Scope - - scope := tokens.Scope{ - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -Example to Create a Token from a Username and Password with Project Name Scope - - scope := tokens.Scope{ - ProjectName: "project_name", - DomainID: "default", - } - - authOptions := tokens.AuthOptions{ - Scope: &scope, - UserID: "username", - Password: "password", - } - - token, err = tokens.Create(identityClient, authOptions).ExtractToken() - if err != nil { - panic(err) - } - -*/ -package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go deleted file mode 100644 index d8c455d1602..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go +++ /dev/null @@ -1,174 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -// Scope allows a created token to be limited to a specific domain or project. -type Scope struct { - ProjectID string - ProjectName string - DomainID string - DomainName string - System bool -} - -// AuthOptionsBuilder provides the ability for extensions to add additional -// parameters to AuthOptions. Extensions must satisfy all required methods. -type AuthOptionsBuilder interface { - // ToTokenV3CreateMap assembles the Create request body, returning an error - // if parameters are missing or inconsistent. - ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) - ToTokenV3HeadersMap(map[string]interface{}) (map[string]string, error) - ToTokenV3ScopeMap() (map[string]interface{}, error) - CanReauth() bool -} - -// AuthOptions represents options for authenticating a user. -type AuthOptions struct { - // IdentityEndpoint specifies the HTTP endpoint that is required to work with - // the Identity API of the appropriate version. While it's ultimately needed - // by all of the identity services, it will often be populated by a - // provider-level function. - IdentityEndpoint string `json:"-"` - - // Username is required if using Identity V2 API. Consult with your provider's - // control panel to discover your account's username. In Identity V3, either - // UserID or a combination of Username and DomainID or DomainName are needed. - Username string `json:"username,omitempty"` - UserID string `json:"id,omitempty"` - - Password string `json:"password,omitempty"` - - // Passcode is used in TOTP authentication method - Passcode string `json:"passcode,omitempty"` - - // At most one of DomainID and DomainName must be provided if using Username - // with Identity V3. Otherwise, either are optional. - DomainID string `json:"-"` - DomainName string `json:"name,omitempty"` - - // AllowReauth should be set to true if you grant permission for Gophercloud - // to cache your credentials in memory, and to allow Gophercloud to attempt - // to re-authenticate automatically if/when your token expires. If you set - // it to false, it will not cache these settings, but re-authentication will - // not be possible. This setting defaults to false. - AllowReauth bool `json:"-"` - - // TokenID allows users to authenticate (possibly as another user) with an - // authentication token ID. - TokenID string `json:"-"` - - // Authentication through Application Credentials requires supplying name, project and secret - // For project we can use TenantID - ApplicationCredentialID string `json:"-"` - ApplicationCredentialName string `json:"-"` - ApplicationCredentialSecret string `json:"-"` - - Scope Scope `json:"-"` -} - -// ToTokenV3CreateMap builds a request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { - gophercloudAuthOpts := gophercloud.AuthOptions{ - Username: opts.Username, - UserID: opts.UserID, - Password: opts.Password, - Passcode: opts.Passcode, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - AllowReauth: opts.AllowReauth, - TokenID: opts.TokenID, - ApplicationCredentialID: opts.ApplicationCredentialID, - ApplicationCredentialName: opts.ApplicationCredentialName, - ApplicationCredentialSecret: opts.ApplicationCredentialSecret, - } - - return gophercloudAuthOpts.ToTokenV3CreateMap(scope) -} - -// ToTokenV3ScopeMap builds a scope request body from AuthOptions. -func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { - scope := gophercloud.AuthScope(opts.Scope) - - gophercloudAuthOpts := gophercloud.AuthOptions{ - Scope: &scope, - DomainID: opts.DomainID, - DomainName: opts.DomainName, - } - - return gophercloudAuthOpts.ToTokenV3ScopeMap() -} - -func (opts *AuthOptions) CanReauth() bool { - if opts.Passcode != "" { - // cannot reauth using TOTP passcode - return false - } - - return opts.AllowReauth -} - -// ToTokenV3HeadersMap allows AuthOptions to satisfy the AuthOptionsBuilder -// interface in the v3 tokens package. -func (opts *AuthOptions) ToTokenV3HeadersMap(map[string]interface{}) (map[string]string, error) { - return nil, nil -} - -func subjectTokenHeaders(subjectToken string) map[string]string { - return map[string]string{ - "X-Subject-Token": subjectToken, - } -} - -// Create authenticates and either generates a new token, or changes the Scope -// of an existing token. -func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { - scope, err := opts.ToTokenV3ScopeMap() - if err != nil { - r.Err = err - return - } - - b, err := opts.ToTokenV3CreateMap(scope) - if err != nil { - r.Err = err - return - } - - resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{"X-Auth-Token": ""}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Get validates and retrieves information about another token. -func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { - resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(token), - OkCodes: []int{200, 203}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Validate determines if a specified token is valid or not. -func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { - resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(token), - OkCodes: []int{200, 204, 404}, - }) - if err != nil { - return false, err - } - - return resp.StatusCode == 200 || resp.StatusCode == 204, nil -} - -// Revoke immediately makes specified token invalid. -func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { - resp, err := c.Delete(tokenURL(c), &gophercloud.RequestOpts{ - MoreHeaders: subjectTokenHeaders(token), - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go deleted file mode 100644 index f1e17e9f75a..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go +++ /dev/null @@ -1,194 +0,0 @@ -package tokens - -import ( - "time" - - "github.com/gophercloud/gophercloud" -) - -// Endpoint represents a single API endpoint offered by a service. -// It matches either a public, internal or admin URL. -// If supported, it contains a region specifier, again if provided. -// The significance of the Region field will depend upon your provider. -type Endpoint struct { - ID string `json:"id"` - Region string `json:"region"` - RegionID string `json:"region_id"` - Interface string `json:"interface"` - URL string `json:"url"` -} - -// CatalogEntry provides a type-safe interface to an Identity API V3 service -// catalog listing. Each class of service, such as cloud DNS or block storage -// services, could have multiple CatalogEntry representing it (one by interface -// type, e.g public, admin or internal). -// -// Note: when looking for the desired service, try, whenever possible, to key -// off the type field. Otherwise, you'll tie the representation of the service -// to a specific provider. -type CatalogEntry struct { - // Service ID - ID string `json:"id"` - - // Name will contain the provider-specified name for the service. - Name string `json:"name"` - - // Type will contain a type string if OpenStack defines a type for the - // service. Otherwise, for provider-specific services, the provider may - // assign their own type strings. - Type string `json:"type"` - - // Endpoints will let the caller iterate over all the different endpoints that - // may exist for the service. - Endpoints []Endpoint `json:"endpoints"` -} - -// ServiceCatalog provides a view into the service catalog from a previous, -// successful authentication. -type ServiceCatalog struct { - Entries []CatalogEntry `json:"catalog"` -} - -// Domain provides information about the domain to which this token grants -// access. -type Domain struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// User represents a user resource that exists in the Identity Service. -type User struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// Role provides information about roles to which User is authorized. -type Role struct { - ID string `json:"id"` - Name string `json:"name"` -} - -// Project provides information about project to which User is authorized. -type Project struct { - Domain Domain `json:"domain"` - ID string `json:"id"` - Name string `json:"name"` -} - -// commonResult is the response from a request. A commonResult has various -// methods which can be used to extract different details about the result. -type commonResult struct { - gophercloud.Result -} - -// Extract is a shortcut for ExtractToken. -// This function is deprecated and still present for backward compatibility. -func (r commonResult) Extract() (*Token, error) { - return r.ExtractToken() -} - -// ExtractToken interprets a commonResult as a Token. -func (r commonResult) ExtractToken() (*Token, error) { - var s Token - err := r.ExtractInto(&s) - if err != nil { - return nil, err - } - - // Parse the token itself from the stored headers. - s.ID = r.Header.Get("X-Subject-Token") - - return &s, err -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r CreateResult) ExtractTokenID() (string, error) { - return r.Header.Get("X-Subject-Token"), r.Err -} - -// ExtractTokenID implements the gophercloud.AuthResult interface. The returned -// string is the same as the ID field of the Token struct returned from -// ExtractToken(). -func (r GetResult) ExtractTokenID() (string, error) { - return r.Header.Get("X-Subject-Token"), r.Err -} - -// ExtractServiceCatalog returns the ServiceCatalog that was generated along -// with the user's Token. -func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { - var s ServiceCatalog - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractUser returns the User that is the owner of the Token. -func (r commonResult) ExtractUser() (*User, error) { - var s struct { - User *User `json:"user"` - } - err := r.ExtractInto(&s) - return s.User, err -} - -// ExtractRoles returns Roles to which User is authorized. -func (r commonResult) ExtractRoles() ([]Role, error) { - var s struct { - Roles []Role `json:"roles"` - } - err := r.ExtractInto(&s) - return s.Roles, err -} - -// ExtractProject returns Project to which User is authorized. -func (r commonResult) ExtractProject() (*Project, error) { - var s struct { - Project *Project `json:"project"` - } - err := r.ExtractInto(&s) - return s.Project, err -} - -// ExtractDomain returns Domain to which User is authorized. -func (r commonResult) ExtractDomain() (*Domain, error) { - var s struct { - Domain *Domain `json:"domain"` - } - err := r.ExtractInto(&s) - return s.Domain, err -} - -// CreateResult is the response from a Create request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type CreateResult struct { - commonResult -} - -// GetResult is the response from a Get request. Use ExtractToken() -// to interpret it as a Token, or ExtractServiceCatalog() to interpret it -// as a service catalog. -type GetResult struct { - commonResult -} - -// RevokeResult is response from a Revoke request. -type RevokeResult struct { - commonResult -} - -// Token is a string that grants a user access to a controlled set of services -// in an OpenStack provider. Each Token is valid for a set length of time. -type Token struct { - // ID is the issued token. - ID string `json:"id"` - - // ExpiresAt is the timestamp at which this token will no longer be accepted. - ExpiresAt time.Time `json:"expires_at"` -} - -func (r commonResult) ExtractInto(v interface{}) error { - return r.ExtractIntoStructPtr(v, "token") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go deleted file mode 100644 index 2f864a31c8b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go +++ /dev/null @@ -1,7 +0,0 @@ -package tokens - -import "github.com/gophercloud/gophercloud" - -func tokenURL(c *gophercloud.ServiceClient) string { - return c.ServiceURL("auth", "tokens") -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go deleted file mode 100644 index 0fa1c083a26..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go +++ /dev/null @@ -1,29 +0,0 @@ -/* -Package accounts contains functionality for working with Object Storage -account resources. An account is the top-level resource the object storage -hierarchy: containers belong to accounts, objects belong to containers. - -Another way of thinking of an account is like a namespace for all your -resources. It is synonymous with a project or tenant in other OpenStack -services. - -Example to Get an Account - - account, err := accounts.Get(objectStorageClient, nil).Extract() - fmt.Printf("%+v\n", account) - -Example to Update an Account - - metadata := map[string]string{ - "some": "metadata", - } - - updateOpts := accounts.UpdateOpts{ - Metadata: metadata, - } - - updateResult, err := accounts.Update(objectStorageClient, updateOpts).Extract() - fmt.Printf("%+v\n", updateResult) - -*/ -package accounts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go deleted file mode 100644 index 7c9acf85ff2..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go +++ /dev/null @@ -1,94 +0,0 @@ -package accounts - -import "github.com/gophercloud/gophercloud" - -// GetOptsBuilder allows extensions to add additional headers to the Get -// request. -type GetOptsBuilder interface { - ToAccountGetMap() (map[string]string, error) -} - -// GetOpts is a structure that contains parameters for getting an account's -// metadata. -type GetOpts struct { - Newest bool `h:"X-Newest"` -} - -// ToAccountGetMap formats a GetOpts into a map[string]string of headers. -func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { - return gophercloud.BuildHeaders(opts) -} - -// Get is a function that retrieves an account's metadata. To extract just the -// custom metadata, call the ExtractMetadata method on the GetResult. To extract -// all the headers that are returned (including the metadata), call the -// Extract method on the GetResult. -func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToAccountGetMap() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Head(getURL(c), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{204}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// UpdateOptsBuilder allows extensions to add additional headers to the Update -// request. -type UpdateOptsBuilder interface { - ToAccountUpdateMap() (map[string]string, error) -} - -// UpdateOpts is a structure that contains parameters for updating, creating, or -// deleting an account's metadata. -type UpdateOpts struct { - Metadata map[string]string - ContentType string `h:"Content-Type"` - DetectContentType bool `h:"X-Detect-Content-Type"` - TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` - TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` -} - -// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. -func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { - headers, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - headers["X-Account-Meta-"+k] = v - } - return headers, err -} - -// Update is a function that creates, updates, or deletes an account's metadata. -// To extract the headers returned, call the Extract method on the UpdateResult. -func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) (r UpdateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToAccountUpdateMap() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("POST", updateURL(c), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201, 202, 204}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go deleted file mode 100644 index c9b7cb7eb1b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go +++ /dev/null @@ -1,112 +0,0 @@ -package accounts - -import ( - "encoding/json" - "strings" - "time" - - "github.com/gophercloud/gophercloud" -) - -// UpdateResult is returned from a call to the Update function. -type UpdateResult struct { - gophercloud.HeaderResult -} - -// UpdateHeader represents the headers returned in the response from an Update -// request. -type UpdateHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - TransID string `json:"X-Trans-Id"` - Date time.Time `json:"-"` -} - -func (r *UpdateHeader) UnmarshalJSON(b []byte) error { - type tmp UpdateHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = UpdateHeader(s.tmp) - - r.Date = time.Time(s.Date) - - return err -} - -// Extract will return a struct of headers returned from a call to Get. To -// obtain a map of headers, call the Extract method on the GetResult. -func (r UpdateResult) Extract() (*UpdateHeader, error) { - var s UpdateHeader - err := r.ExtractInto(&s) - return &s, err -} - -// GetHeader represents the headers returned in the response from a Get request. -type GetHeader struct { - BytesUsed int64 `json:"X-Account-Bytes-Used,string"` - QuotaBytes *int64 `json:"X-Account-Meta-Quota-Bytes,string"` - ContainerCount int64 `json:"X-Account-Container-Count,string"` - ContentLength int64 `json:"Content-Length,string"` - ObjectCount int64 `json:"X-Account-Object-Count,string"` - ContentType string `json:"Content-Type"` - TransID string `json:"X-Trans-Id"` - TempURLKey string `json:"X-Account-Meta-Temp-URL-Key"` - TempURLKey2 string `json:"X-Account-Meta-Temp-URL-Key-2"` - Date time.Time `json:"-"` -} - -func (r *GetHeader) UnmarshalJSON(b []byte) error { - type tmp GetHeader - var s struct { - tmp - Date string `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = GetHeader(s.tmp) - - if s.Date != "" { - r.Date, err = time.Parse(time.RFC1123, s.Date) - } - - return err -} - -// GetResult is returned from a call to the Get function. -type GetResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Get. -func (r GetResult) Extract() (*GetHeader, error) { - var s GetHeader - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractMetadata is a function that takes a GetResult (of type *http.Response) -// and returns the custom metatdata associated with the account. -func (r GetResult) ExtractMetadata() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - - metadata := make(map[string]string) - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Account-Meta-") { - key := strings.TrimPrefix(k, "X-Account-Meta-") - metadata[key] = v[0] - } - } - return metadata, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go deleted file mode 100644 index 71540b1daf3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go +++ /dev/null @@ -1,11 +0,0 @@ -package accounts - -import "github.com/gophercloud/gophercloud" - -func getURL(c *gophercloud.ServiceClient) string { - return c.Endpoint -} - -func updateURL(c *gophercloud.ServiceClient) string { - return getURL(c) -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go deleted file mode 100644 index ffc4f05297b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go +++ /dev/null @@ -1,95 +0,0 @@ -/* -Package containers contains functionality for working with Object Storage -container resources. A container serves as a logical namespace for objects -that are placed inside it - an object with the same name in two different -containers represents two different objects. - -In addition to containing objects, you can also use the container to control -access to objects by using an access control list (ACL). - -Note: When referencing the Object Storage API docs, some of the API actions -are listed under "accounts" rather than "containers". This was an intentional -design in Gophercloud to make some container actions feel more natural. - -Example to List Containers - - listOpts := containers.ListOpts{ - Full: true, - } - - allPages, err := containers.List(objectStorageClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allContainers, err := containers.ExtractInfo(allPages) - if err != nil { - panic(err) - } - - for _, container := range allContainers { - fmt.Printf("%+v\n", container) - } - -Example to List Only Container Names - - listOpts := containers.ListOpts{ - Full: false, - } - - allPages, err := containers.List(objectStorageClient, listOpts).AllPages() - if err != nil { - panic(err) - } - - allContainers, err := containers.ExtractNames(allPages) - if err != nil { - panic(err) - } - - for _, container := range allContainers { - fmt.Printf("%+v\n", container) - } - -Example to Create a Container - - createOpts := containers.CreateOpts{ - ContentType: "application/json", - Metadata: map[string]string{ - "foo": "bar", - }, - } - - container, err := containers.Create(objectStorageClient, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Update a Container - - containerName := "my_container" - - updateOpts := containers.UpdateOpts{ - Metadata: map[string]string{ - "bar": "baz", - }, - RemoveMetadata: []string{ - "foo", - }, - } - - container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete a Container - - containerName := "my_container" - - container, err := containers.Delete(objectStorageClient, containerName).Extract() - if err != nil { - panic(err) - } -*/ -package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go deleted file mode 100644 index b8f169345d6..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go +++ /dev/null @@ -1,250 +0,0 @@ -package containers - -import ( - "net/url" - "strings" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToContainerListParams() (bool, string, error) -} - -// ListOpts is a structure that holds options for listing containers. -type ListOpts struct { - Full bool - Limit int `q:"limit"` - Marker string `q:"marker"` - EndMarker string `q:"end_marker"` - Format string `q:"format"` - Prefix string `q:"prefix"` - Delimiter string `q:"delimiter"` -} - -// ToContainerListParams formats a ListOpts into a query string and boolean -// representing whether to list complete information for each container. -func (opts ListOpts) ToContainerListParams() (bool, string, error) { - q, err := gophercloud.BuildQueryString(opts) - return opts.Full, q.String(), err -} - -// List is a function that retrieves containers associated with the account as -// well as account metadata. It returns a pager which can be iterated with the -// EachPage function. -func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { - headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} - - url := listURL(c) - if opts != nil { - full, query, err := opts.ToContainerListParams() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - - if full { - headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} - } - } - - pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} - p.MarkerPageBase.Owner = p - return p - }) - pager.Headers = headers - return pager -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToContainerCreateMap() (map[string]string, error) -} - -// CreateOpts is a structure that holds parameters for creating a container. -type CreateOpts struct { - Metadata map[string]string - ContainerRead string `h:"X-Container-Read"` - ContainerSyncTo string `h:"X-Container-Sync-To"` - ContainerSyncKey string `h:"X-Container-Sync-Key"` - ContainerWrite string `h:"X-Container-Write"` - ContentType string `h:"Content-Type"` - DetectContentType bool `h:"X-Detect-Content-Type"` - IfNoneMatch string `h:"If-None-Match"` - VersionsLocation string `h:"X-Versions-Location"` - HistoryLocation string `h:"X-History-Location"` - TempURLKey string `h:"X-Container-Meta-Temp-URL-Key"` - TempURLKey2 string `h:"X-Container-Meta-Temp-URL-Key-2"` -} - -// ToContainerCreateMap formats a CreateOpts into a map of headers. -func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Container-Meta-"+k] = v - } - return h, nil -} - -// Create is a function that creates a new container. -func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToContainerCreateMap() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("PUT", createURL(c, url.QueryEscape(containerName)), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201, 202, 204}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// BulkDelete is a function that bulk deletes containers. -func BulkDelete(c *gophercloud.ServiceClient, containers []string) (r BulkDeleteResult) { - // urlencode container names to be on the safe side - // https://github.com/openstack/swift/blob/stable/train/swift/common/middleware/bulk.py#L160 - // https://github.com/openstack/swift/blob/stable/train/swift/common/swob.py#L302 - encodedContainers := make([]string, len(containers)) - for i, v := range containers { - encodedContainers[i] = url.QueryEscape(v) - } - b := strings.NewReader(strings.Join(encodedContainers, "\n") + "\n") - resp, err := c.Post(bulkDeleteURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{ - "Accept": "application/json", - "Content-Type": "text/plain", - }, - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// Delete is a function that deletes a container. -func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) { - resp, err := c.Delete(deleteURL(c, url.QueryEscape(containerName)), nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToContainerUpdateMap() (map[string]string, error) -} - -// UpdateOpts is a structure that holds parameters for updating, creating, or -// deleting a container's metadata. -type UpdateOpts struct { - Metadata map[string]string - RemoveMetadata []string - ContainerRead string `h:"X-Container-Read"` - ContainerSyncTo string `h:"X-Container-Sync-To"` - ContainerSyncKey string `h:"X-Container-Sync-Key"` - ContainerWrite string `h:"X-Container-Write"` - ContentType string `h:"Content-Type"` - DetectContentType bool `h:"X-Detect-Content-Type"` - RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` - VersionsLocation string `h:"X-Versions-Location"` - RemoveHistoryLocation string `h:"X-Remove-History-Location"` - HistoryLocation string `h:"X-History-Location"` - TempURLKey string `h:"X-Container-Meta-Temp-URL-Key"` - TempURLKey2 string `h:"X-Container-Meta-Temp-URL-Key-2"` -} - -// ToContainerUpdateMap formats a UpdateOpts into a map of headers. -func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - - for k, v := range opts.Metadata { - h["X-Container-Meta-"+k] = v - } - - for _, k := range opts.RemoveMetadata { - h["X-Remove-Container-Meta-"+k] = "remove" - } - - return h, nil -} - -// Update is a function that creates, updates, or deletes a container's -// metadata. -func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToContainerUpdateMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Request("POST", updateURL(c, url.QueryEscape(containerName)), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201, 202, 204}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// GetOptsBuilder allows extensions to add additional parameters to the Get -// request. -type GetOptsBuilder interface { - ToContainerGetMap() (map[string]string, error) -} - -// GetOpts is a structure that holds options for listing containers. -type GetOpts struct { - Newest bool `h:"X-Newest"` -} - -// ToContainerGetMap formats a GetOpts into a map of headers. -func (opts GetOpts) ToContainerGetMap() (map[string]string, error) { - return gophercloud.BuildHeaders(opts) -} - -// Get is a function that retrieves the metadata of a container. To extract just -// the custom metadata, pass the GetResult response to the ExtractMetadata -// function. -func Get(c *gophercloud.ServiceClient, containerName string, opts GetOptsBuilder) (r GetResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToContainerGetMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - } - resp, err := c.Head(getURL(c, url.QueryEscape(containerName)), &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{200, 204}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go deleted file mode 100644 index 14e390541f3..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go +++ /dev/null @@ -1,301 +0,0 @@ -package containers - -import ( - "encoding/json" - "fmt" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Container represents a container resource. -type Container struct { - // The total number of bytes stored in the container. - Bytes int64 `json:"bytes"` - - // The total number of objects stored in the container. - Count int64 `json:"count"` - - // The name of the container. - Name string `json:"name"` -} - -// ContainerPage is the page returned by a pager when traversing over a -// collection of containers. -type ContainerPage struct { - pagination.MarkerPageBase -} - -//IsEmpty returns true if a ListResult contains no container names. -func (r ContainerPage) IsEmpty() (bool, error) { - names, err := ExtractNames(r) - return len(names) == 0, err -} - -// LastMarker returns the last container name in a ListResult. -func (r ContainerPage) LastMarker() (string, error) { - names, err := ExtractNames(r) - if err != nil { - return "", err - } - if len(names) == 0 { - return "", nil - } - return names[len(names)-1], nil -} - -// ExtractInfo is a function that takes a ListResult and returns the -// containers' information. -func ExtractInfo(r pagination.Page) ([]Container, error) { - var s []Container - err := (r.(ContainerPage)).ExtractInto(&s) - return s, err -} - -// ExtractNames is a function that takes a ListResult and returns the -// containers' names. -func ExtractNames(page pagination.Page) ([]string, error) { - casted := page.(ContainerPage) - ct := casted.Header.Get("Content-Type") - - switch { - case strings.HasPrefix(ct, "application/json"): - parsed, err := ExtractInfo(page) - if err != nil { - return nil, err - } - - names := make([]string, 0, len(parsed)) - for _, container := range parsed { - names = append(names, container.Name) - } - return names, nil - case strings.HasPrefix(ct, "text/plain"): - names := make([]string, 0, 50) - - body := string(page.(ContainerPage).Body.([]uint8)) - for _, name := range strings.Split(body, "\n") { - if len(name) > 0 { - names = append(names, name) - } - } - - return names, nil - default: - return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) - } -} - -// GetHeader represents the headers returned in the response from a Get request. -type GetHeader struct { - AcceptRanges string `json:"Accept-Ranges"` - BytesUsed int64 `json:"X-Container-Bytes-Used,string"` - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - ObjectCount int64 `json:"X-Container-Object-Count,string"` - Read []string `json:"-"` - TransID string `json:"X-Trans-Id"` - VersionsLocation string `json:"X-Versions-Location"` - HistoryLocation string `json:"X-History-Location"` - Write []string `json:"-"` - StoragePolicy string `json:"X-Storage-Policy"` - TempURLKey string `json:"X-Container-Meta-Temp-URL-Key"` - TempURLKey2 string `json:"X-Container-Meta-Temp-URL-Key-2"` -} - -func (r *GetHeader) UnmarshalJSON(b []byte) error { - type tmp GetHeader - var s struct { - tmp - Write string `json:"X-Container-Write"` - Read string `json:"X-Container-Read"` - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = GetHeader(s.tmp) - - r.Read = strings.Split(s.Read, ",") - r.Write = strings.Split(s.Write, ",") - - r.Date = time.Time(s.Date) - - return err -} - -// GetResult represents the result of a get operation. -type GetResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Get. -func (r GetResult) Extract() (*GetHeader, error) { - var s GetHeader - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractMetadata is a function that takes a GetResult (of type *http.Response) -// and returns the custom metadata associated with the container. -func (r GetResult) ExtractMetadata() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - metadata := make(map[string]string) - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Container-Meta-") { - key := strings.TrimPrefix(k, "X-Container-Meta-") - metadata[key] = v[0] - } - } - return metadata, nil -} - -// CreateHeader represents the headers returned in the response from a Create -// request. -type CreateHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *CreateHeader) UnmarshalJSON(b []byte) error { - type tmp CreateHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = CreateHeader(s.tmp) - - r.Date = time.Time(s.Date) - - return err -} - -// CreateResult represents the result of a create operation. To extract the -// the headers from the HTTP response, call its Extract method. -type CreateResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Create. -// To extract the headers from the HTTP response, call its Extract method. -func (r CreateResult) Extract() (*CreateHeader, error) { - var s CreateHeader - err := r.ExtractInto(&s) - return &s, err -} - -// UpdateHeader represents the headers returned in the response from a Update -// request. -type UpdateHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *UpdateHeader) UnmarshalJSON(b []byte) error { - type tmp UpdateHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = UpdateHeader(s.tmp) - - r.Date = time.Time(s.Date) - - return err -} - -// UpdateResult represents the result of an update operation. To extract the -// the headers from the HTTP response, call its Extract method. -type UpdateResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Update. -func (r UpdateResult) Extract() (*UpdateHeader, error) { - var s UpdateHeader - err := r.ExtractInto(&s) - return &s, err -} - -// DeleteHeader represents the headers returned in the response from a Delete -// request. -type DeleteHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *DeleteHeader) UnmarshalJSON(b []byte) error { - type tmp DeleteHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = DeleteHeader(s.tmp) - - r.Date = time.Time(s.Date) - - return err -} - -// DeleteResult represents the result of a delete operation. To extract the -// headers from the HTTP response, call its Extract method. -type DeleteResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Delete. -func (r DeleteResult) Extract() (*DeleteHeader, error) { - var s DeleteHeader - err := r.ExtractInto(&s) - return &s, err -} - -type BulkDeleteResponse struct { - ResponseStatus string `json:"Response Status"` - ResponseBody string `json:"Response Body"` - Errors [][]string `json:"Errors"` - NumberDeleted int `json:"Number Deleted"` - NumberNotFound int `json:"Number Not Found"` -} - -// BulkDeleteResult represents the result of a bulk delete operation. To extract -// the response object from the HTTP response, call its Extract method. -type BulkDeleteResult struct { - gophercloud.Result -} - -// Extract will return a BulkDeleteResponse struct returned from a BulkDelete -// call. -func (r BulkDeleteResult) Extract() (*BulkDeleteResponse, error) { - var s BulkDeleteResponse - err := r.ExtractInto(&s) - return &s, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go deleted file mode 100644 index 0044a5e206b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go +++ /dev/null @@ -1,27 +0,0 @@ -package containers - -import "github.com/gophercloud/gophercloud" - -func listURL(c *gophercloud.ServiceClient) string { - return c.Endpoint -} - -func createURL(c *gophercloud.ServiceClient, container string) string { - return c.ServiceURL(container) -} - -func getURL(c *gophercloud.ServiceClient, container string) string { - return createURL(c, container) -} - -func deleteURL(c *gophercloud.ServiceClient, container string) string { - return createURL(c, container) -} - -func updateURL(c *gophercloud.ServiceClient, container string) string { - return createURL(c, container) -} - -func bulkDeleteURL(c *gophercloud.ServiceClient) string { - return c.Endpoint + "?bulk-delete=true" -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go deleted file mode 100644 index 7714460aadc..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go +++ /dev/null @@ -1,110 +0,0 @@ -/* -Package objects contains functionality for working with Object Storage -object resources. An object is a resource that represents and contains data -- such as documents, images, and so on. You can also store custom metadata -with an object. - -Note: When referencing the Object Storage API docs, some of the API actions -are listed under "containers" rather than "objects". This was an intentional -design in Gophercloud to make some object actions feel more natural. - -Example to List Objects - - containerName := "my_container" - - listOpts := objects.ListOpts{ - Full: true, - } - - allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() - if err != nil { - panic(err) - } - - allObjects, err := objects.ExtractInfo(allPages) - if err != nil { - panic(err) - } - - for _, object := range allObjects { - fmt.Printf("%+v\n", object) - } - -Example to List Object Names - - containerName := "my_container" - - listOpts := objects.ListOpts{ - Full: false, - } - - allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() - if err != nil { - panic(err) - } - - allObjects, err := objects.ExtractNames(allPages) - if err != nil { - panic(err) - } - - for _, object := range allObjects { - fmt.Printf("%+v\n", object) - } - -Example to Create an Object - - content := "some object content" - objectName := "my_object" - containerName := "my_container" - - createOpts := objects.CreateOpts{ - ContentType: "text/plain" - Content: strings.NewReader(content), - } - - object, err := objects.Create(objectStorageClient, containerName, objectName, createOpts).Extract() - if err != nil { - panic(err) - } - -Example to Copy an Object - - objectName := "my_object" - containerName := "my_container" - - copyOpts := objects.CopyOpts{ - Destination: "/newContainer/newObject", - } - - object, err := objects.Copy(objectStorageClient, containerName, objectName, copyOpts).Extract() - if err != nil { - panic(err) - } - -Example to Delete an Object - - objectName := "my_object" - containerName := "my_container" - - object, err := objects.Delete(objectStorageClient, containerName, objectName).Extract() - if err != nil { - panic(err) - } - -Example to Download an Object's Data - - objectName := "my_object" - containerName := "my_container" - - object := objects.Download(objectStorageClient, containerName, objectName, nil) - if object.Err != nil { - panic(object.Err) - } - // if "ExtractContent" method is not called, the HTTP connection will remain consumed - content, err := object.ExtractContent() - if err != nil { - panic(err) - } -*/ -package objects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go deleted file mode 100644 index 5c4ae44d317..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go +++ /dev/null @@ -1,13 +0,0 @@ -package objects - -import "github.com/gophercloud/gophercloud" - -// ErrWrongChecksum is the error when the checksum generated for an object -// doesn't match the ETAG header. -type ErrWrongChecksum struct { - gophercloud.BaseError -} - -func (e ErrWrongChecksum) Error() string { - return "Local checksum does not match API ETag header" -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go deleted file mode 100644 index c11241cc2f8..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go +++ /dev/null @@ -1,527 +0,0 @@ -package objects - -import ( - "bytes" - "crypto/hmac" - "crypto/md5" - "crypto/sha1" - "fmt" - "io" - "io/ioutil" - "net/url" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" - "github.com/gophercloud/gophercloud/pagination" -) - -// ListOptsBuilder allows extensions to add additional parameters to the List -// request. -type ListOptsBuilder interface { - ToObjectListParams() (bool, string, error) -} - -// ListOpts is a structure that holds parameters for listing objects. -type ListOpts struct { - // Full is a true/false value that represents the amount of object information - // returned. If Full is set to true, then the content-type, number of bytes, - // hash date last modified, and name are returned. If set to false or not set, - // then only the object names are returned. - Full bool - Limit int `q:"limit"` - Marker string `q:"marker"` - EndMarker string `q:"end_marker"` - Format string `q:"format"` - Prefix string `q:"prefix"` - Delimiter string `q:"delimiter"` - Path string `q:"path"` -} - -// ToObjectListParams formats a ListOpts into a query string and boolean -// representing whether to list complete information for each object. -func (opts ListOpts) ToObjectListParams() (bool, string, error) { - q, err := gophercloud.BuildQueryString(opts) - return opts.Full, q.String(), err -} - -// List is a function that retrieves all objects in a container. It also returns -// the details for the container. To extract only the object information or names, -// pass the ListResult response to the ExtractInfo or ExtractNames function, -// respectively. -func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { - headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} - - url := listURL(c, url.QueryEscape(containerName)) - if opts != nil { - full, query, err := opts.ToObjectListParams() - if err != nil { - return pagination.Pager{Err: err} - } - url += query - - if full { - headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} - } - } - - pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { - p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} - p.MarkerPageBase.Owner = p - return p - }) - pager.Headers = headers - return pager -} - -// DownloadOptsBuilder allows extensions to add additional parameters to the -// Download request. -type DownloadOptsBuilder interface { - ToObjectDownloadParams() (map[string]string, string, error) -} - -// DownloadOpts is a structure that holds parameters for downloading an object. -type DownloadOpts struct { - IfMatch string `h:"If-Match"` - IfModifiedSince time.Time `h:"If-Modified-Since"` - IfNoneMatch string `h:"If-None-Match"` - IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` - Newest bool `h:"X-Newest"` - Range string `h:"Range"` - Expires string `q:"expires"` - MultipartManifest string `q:"multipart-manifest"` - Signature string `q:"signature"` -} - -// ToObjectDownloadParams formats a DownloadOpts into a query string and map of -// headers. -func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return nil, "", err - } - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, q.String(), err - } - return h, q.String(), nil -} - -// Download is a function that retrieves the content and metadata for an object. -// To extract just the content, pass the DownloadResult response to the -// ExtractContent function. -func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) (r DownloadResult) { - url := downloadURL(c, url.QueryEscape(containerName), url.QueryEscape(objectName)) - h := make(map[string]string) - if opts != nil { - headers, query, err := opts.ToObjectDownloadParams() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - url += query - } - - resp, err := c.Get(url, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{200, 206, 304}, - KeepResponseBody: true, - }) - r.Body, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// CreateOptsBuilder allows extensions to add additional parameters to the -// Create request. -type CreateOptsBuilder interface { - ToObjectCreateParams() (io.Reader, map[string]string, string, error) -} - -// CreateOpts is a structure that holds parameters for creating an object. -type CreateOpts struct { - Content io.Reader - Metadata map[string]string - NoETag bool - CacheControl string `h:"Cache-Control"` - ContentDisposition string `h:"Content-Disposition"` - ContentEncoding string `h:"Content-Encoding"` - ContentLength int64 `h:"Content-Length"` - ContentType string `h:"Content-Type"` - CopyFrom string `h:"X-Copy-From"` - DeleteAfter int64 `h:"X-Delete-After"` - DeleteAt int64 `h:"X-Delete-At"` - DetectContentType string `h:"X-Detect-Content-Type"` - ETag string `h:"ETag"` - IfNoneMatch string `h:"If-None-Match"` - ObjectManifest string `h:"X-Object-Manifest"` - TransferEncoding string `h:"Transfer-Encoding"` - Expires string `q:"expires"` - MultipartManifest string `q:"multipart-manifest"` - Signature string `q:"signature"` -} - -// ToObjectCreateParams formats a CreateOpts into a query string and map of -// headers. -func (opts CreateOpts) ToObjectCreateParams() (io.Reader, map[string]string, string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return nil, nil, "", err - } - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, nil, "", err - } - - for k, v := range opts.Metadata { - h["X-Object-Meta-"+k] = v - } - - if opts.NoETag { - delete(h, "etag") - return opts.Content, h, q.String(), nil - } - - if h["ETag"] != "" { - return opts.Content, h, q.String(), nil - } - - // When we're dealing with big files an io.ReadSeeker allows us to efficiently calculate - // the md5 sum. An io.Reader is only readable once which means we have to copy the entire - // file content into memory first. - readSeeker, isReadSeeker := opts.Content.(io.ReadSeeker) - if !isReadSeeker { - data, err := ioutil.ReadAll(opts.Content) - if err != nil { - return nil, nil, "", err - } - readSeeker = bytes.NewReader(data) - } - - hash := md5.New() - // io.Copy into md5 is very efficient as it's done in small chunks. - if _, err := io.Copy(hash, readSeeker); err != nil { - return nil, nil, "", err - } - readSeeker.Seek(0, io.SeekStart) - - h["ETag"] = fmt.Sprintf("%x", hash.Sum(nil)) - - return readSeeker, h, q.String(), nil -} - -// Create is a function that creates a new object or replaces an existing -// object. If the returned response's ETag header fails to match the local -// checksum, the failed request will automatically be retried up to a maximum -// of 3 times. -func Create(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateOptsBuilder) (r CreateResult) { - url := createURL(c, url.QueryEscape(containerName), url.QueryEscape(objectName)) - h := make(map[string]string) - var b io.Reader - if opts != nil { - tmpB, headers, query, err := opts.ToObjectCreateParams() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - url += query - b = tmpB - } - - resp, err := c.Put(url, b, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// CopyOptsBuilder allows extensions to add additional parameters to the -// Copy request. -type CopyOptsBuilder interface { - ToObjectCopyMap() (map[string]string, error) -} - -// CopyOpts is a structure that holds parameters for copying one object to -// another. -type CopyOpts struct { - Metadata map[string]string - ContentDisposition string `h:"Content-Disposition"` - ContentEncoding string `h:"Content-Encoding"` - ContentType string `h:"Content-Type"` - Destination string `h:"Destination" required:"true"` -} - -// ToObjectCopyMap formats a CopyOpts into a map of headers. -func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Object-Meta-"+k] = v - } - return h, nil -} - -// Copy is a function that copies one object to another. -func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) (r CopyResult) { - h := make(map[string]string) - headers, err := opts.ToObjectCopyMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - - url := copyURL(c, url.QueryEscape(containerName), url.QueryEscape(objectName)) - resp, err := c.Request("COPY", url, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{201}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// DeleteOptsBuilder allows extensions to add additional parameters to the -// Delete request. -type DeleteOptsBuilder interface { - ToObjectDeleteQuery() (string, error) -} - -// DeleteOpts is a structure that holds parameters for deleting an object. -type DeleteOpts struct { - MultipartManifest string `q:"multipart-manifest"` -} - -// ToObjectDeleteQuery formats a DeleteOpts into a query string. -func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { - q, err := gophercloud.BuildQueryString(opts) - return q.String(), err -} - -// Delete is a function that deletes an object. -func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) (r DeleteResult) { - url := deleteURL(c, url.QueryEscape(containerName), url.QueryEscape(objectName)) - if opts != nil { - query, err := opts.ToObjectDeleteQuery() - if err != nil { - r.Err = err - return - } - url += query - } - resp, err := c.Delete(url, nil) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// GetOptsBuilder allows extensions to add additional parameters to the -// Get request. -type GetOptsBuilder interface { - ToObjectGetParams() (map[string]string, string, error) -} - -// GetOpts is a structure that holds parameters for getting an object's -// metadata. -type GetOpts struct { - Newest bool `h:"X-Newest"` - Expires string `q:"expires"` - Signature string `q:"signature"` -} - -// ToObjectGetParams formats a GetOpts into a query string and a map of headers. -func (opts GetOpts) ToObjectGetParams() (map[string]string, string, error) { - q, err := gophercloud.BuildQueryString(opts) - if err != nil { - return nil, "", err - } - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, q.String(), err - } - return h, q.String(), nil -} - -// Get is a function that retrieves the metadata of an object. To extract just -// the custom metadata, pass the GetResult response to the ExtractMetadata -// function. -func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) (r GetResult) { - url := getURL(c, url.QueryEscape(containerName), url.QueryEscape(objectName)) - h := make(map[string]string) - if opts != nil { - headers, query, err := opts.ToObjectGetParams() - if err != nil { - r.Err = err - return - } - for k, v := range headers { - h[k] = v - } - url += query - } - - resp, err := c.Head(url, &gophercloud.RequestOpts{ - MoreHeaders: h, - OkCodes: []int{200, 204}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// UpdateOptsBuilder allows extensions to add additional parameters to the -// Update request. -type UpdateOptsBuilder interface { - ToObjectUpdateMap() (map[string]string, error) -} - -// UpdateOpts is a structure that holds parameters for updating, creating, or -// deleting an object's metadata. -type UpdateOpts struct { - Metadata map[string]string - ContentDisposition string `h:"Content-Disposition"` - ContentEncoding string `h:"Content-Encoding"` - ContentType string `h:"Content-Type"` - DeleteAfter int64 `h:"X-Delete-After"` - DeleteAt int64 `h:"X-Delete-At"` - DetectContentType bool `h:"X-Detect-Content-Type"` -} - -// ToObjectUpdateMap formats a UpdateOpts into a map of headers. -func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { - h, err := gophercloud.BuildHeaders(opts) - if err != nil { - return nil, err - } - for k, v := range opts.Metadata { - h["X-Object-Meta-"+k] = v - } - return h, nil -} - -// Update is a function that creates, updates, or deletes an object's metadata. -func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) (r UpdateResult) { - h := make(map[string]string) - if opts != nil { - headers, err := opts.ToObjectUpdateMap() - if err != nil { - r.Err = err - return - } - - for k, v := range headers { - h[k] = v - } - } - url := updateURL(c, url.QueryEscape(containerName), url.QueryEscape(objectName)) - resp, err := c.Post(url, nil, nil, &gophercloud.RequestOpts{ - MoreHeaders: h, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} - -// HTTPMethod represents an HTTP method string (e.g. "GET"). -type HTTPMethod string - -var ( - // GET represents an HTTP "GET" method. - GET HTTPMethod = "GET" - - // POST represents an HTTP "POST" method. - POST HTTPMethod = "POST" -) - -// CreateTempURLOpts are options for creating a temporary URL for an object. -type CreateTempURLOpts struct { - // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. - // Valid values are "GET" and "POST". - Method HTTPMethod - - // (REQUIRED) TTL is the number of seconds the temp URL should be active. - TTL int - - // (Optional) Split is the string on which to split the object URL. Since only - // the object path is used in the hash, the object URL needs to be parsed. If - // empty, the default OpenStack URL split point will be used ("/v1/"). - Split string - - // Timestamp is a timestamp to calculate Temp URL signature. Optional. - Timestamp time.Time -} - -// CreateTempURL is a function for creating a temporary URL for an object. It -// allows users to have "GET" or "POST" access to a particular tenant's object -// for a limited amount of time. -func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { - if opts.Split == "" { - opts.Split = "/v1/" - } - - // Initialize time if it was not passed as opts - var date time.Time - if opts.Timestamp.IsZero() { - date = time.Now().UTC() - } else { - date = opts.Timestamp - } - - duration := time.Duration(opts.TTL) * time.Second - expiry := date.Add(duration).Unix() - getHeader, err := containers.Get(c, url.QueryEscape(containerName), nil).Extract() - if err != nil { - return "", err - } - tempURLKey := getHeader.TempURLKey - if tempURLKey == "" { - // fallback to an account TempURL key - getHeader, err := accounts.Get(c, nil).Extract() - if err != nil { - return "", err - } - tempURLKey = getHeader.TempURLKey - } - secretKey := []byte(tempURLKey) - url := getURL(c, containerName, objectName) - splitPath := strings.Split(url, opts.Split) - baseURL, objectPath := splitPath[0], splitPath[1] - objectPath = opts.Split + objectPath - body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) - hash := hmac.New(sha1.New, secretKey) - hash.Write([]byte(body)) - hexsum := fmt.Sprintf("%x", hash.Sum(nil)) - return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil -} - -// BulkDelete is a function that bulk deletes objects. -func BulkDelete(c *gophercloud.ServiceClient, container string, objects []string) (r BulkDeleteResult) { - // urlencode object names to be on the safe side - // https://github.com/openstack/swift/blob/stable/train/swift/common/middleware/bulk.py#L160 - // https://github.com/openstack/swift/blob/stable/train/swift/common/swob.py#L302 - encodedObjects := make([]string, len(objects)) - for i, v := range objects { - encodedObjects[i] = strings.Join([]string{ - url.QueryEscape(container), - url.QueryEscape(v)}, - "/") - } - b := strings.NewReader(strings.Join(encodedObjects, "\n") + "\n") - resp, err := c.Post(bulkDeleteURL(c), b, &r.Body, &gophercloud.RequestOpts{ - MoreHeaders: map[string]string{ - "Accept": "application/json", - "Content-Type": "text/plain", - }, - OkCodes: []int{200}, - }) - _, r.Header, r.Err = gophercloud.ParseResponse(resp, err) - return -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go deleted file mode 100644 index 75367d8349b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go +++ /dev/null @@ -1,534 +0,0 @@ -package objects - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/url" - "strings" - "time" - - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/pagination" -) - -// Object is a structure that holds information related to a storage object. -type Object struct { - // Bytes is the total number of bytes that comprise the object. - Bytes int64 `json:"bytes"` - - // ContentType is the content type of the object. - ContentType string `json:"content_type"` - - // Hash represents the MD5 checksum value of the object's content. - Hash string `json:"hash"` - - // LastModified is the time the object was last modified. - LastModified time.Time `json:"-"` - - // Name is the unique name for the object. - Name string `json:"name"` - - // Subdir denotes if the result contains a subdir. - Subdir string `json:"subdir"` -} - -func (r *Object) UnmarshalJSON(b []byte) error { - type tmp Object - var s *struct { - tmp - LastModified string `json:"last_modified"` - } - - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = Object(s.tmp) - - if s.LastModified != "" { - t, err := time.Parse(gophercloud.RFC3339MilliNoZ, s.LastModified) - if err != nil { - t, err = time.Parse(gophercloud.RFC3339Milli, s.LastModified) - if err != nil { - return err - } - } - r.LastModified = t - } - - return nil -} - -// ObjectPage is a single page of objects that is returned from a call to the -// List function. -type ObjectPage struct { - pagination.MarkerPageBase -} - -// IsEmpty returns true if a ListResult contains no object names. -func (r ObjectPage) IsEmpty() (bool, error) { - names, err := ExtractNames(r) - return len(names) == 0, err -} - -// LastMarker returns the last object name in a ListResult. -func (r ObjectPage) LastMarker() (string, error) { - return extractLastMarker(r) -} - -// ExtractInfo is a function that takes a page of objects and returns their -// full information. -func ExtractInfo(r pagination.Page) ([]Object, error) { - var s []Object - err := (r.(ObjectPage)).ExtractInto(&s) - return s, err -} - -// ExtractNames is a function that takes a page of objects and returns only -// their names. -func ExtractNames(r pagination.Page) ([]string, error) { - casted := r.(ObjectPage) - ct := casted.Header.Get("Content-Type") - switch { - case strings.HasPrefix(ct, "application/json"): - parsed, err := ExtractInfo(r) - if err != nil { - return nil, err - } - - names := make([]string, 0, len(parsed)) - for _, object := range parsed { - if object.Subdir != "" { - names = append(names, object.Subdir) - } else { - names = append(names, object.Name) - } - } - - return names, nil - case strings.HasPrefix(ct, "text/plain"): - names := make([]string, 0, 50) - - body := string(r.(ObjectPage).Body.([]uint8)) - for _, name := range strings.Split(body, "\n") { - if len(name) > 0 { - names = append(names, name) - } - } - - return names, nil - case strings.HasPrefix(ct, "text/html"): - return []string{}, nil - default: - return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) - } -} - -// DownloadHeader represents the headers returned in the response from a -// Download request. -type DownloadHeader struct { - AcceptRanges string `json:"Accept-Ranges"` - ContentDisposition string `json:"Content-Disposition"` - ContentEncoding string `json:"Content-Encoding"` - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - DeleteAt time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - ObjectManifest string `json:"X-Object-Manifest"` - StaticLargeObject bool `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *DownloadHeader) UnmarshalJSON(b []byte) error { - type tmp DownloadHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - StaticLargeObject interface{} `json:"X-Static-Large-Object"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = DownloadHeader(s.tmp) - - switch t := s.StaticLargeObject.(type) { - case string: - if t == "True" || t == "true" { - r.StaticLargeObject = true - } - case bool: - r.StaticLargeObject = t - } - - r.Date = time.Time(s.Date) - r.DeleteAt = time.Time(s.DeleteAt) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// DownloadResult is a *http.Response that is returned from a call to the -// Download function. -type DownloadResult struct { - gophercloud.HeaderResult - Body io.ReadCloser -} - -// Extract will return a struct of headers returned from a call to Download. -func (r DownloadResult) Extract() (*DownloadHeader, error) { - var s DownloadHeader - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractContent is a function that takes a DownloadResult's io.Reader body -// and reads all available data into a slice of bytes. Please be aware that due -// the nature of io.Reader is forward-only - meaning that it can only be read -// once and not rewound. You can recreate a reader from the output of this -// function by using bytes.NewReader(downloadBytes) -func (r *DownloadResult) ExtractContent() ([]byte, error) { - if r.Err != nil { - return nil, r.Err - } - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, err - } - return body, nil -} - -// GetHeader represents the headers returned in the response from a Get request. -type GetHeader struct { - ContentDisposition string `json:"Content-Disposition"` - ContentEncoding string `json:"Content-Encoding"` - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - DeleteAt time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - ObjectManifest string `json:"X-Object-Manifest"` - StaticLargeObject bool `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *GetHeader) UnmarshalJSON(b []byte) error { - type tmp GetHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - StaticLargeObject interface{} `json:"X-Static-Large-Object"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = GetHeader(s.tmp) - - switch t := s.StaticLargeObject.(type) { - case string: - if t == "True" || t == "true" { - r.StaticLargeObject = true - } - case bool: - r.StaticLargeObject = t - } - - r.Date = time.Time(s.Date) - r.DeleteAt = time.Time(s.DeleteAt) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// GetResult is a *http.Response that is returned from a call to the Get -// function. -type GetResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Get. -func (r GetResult) Extract() (*GetHeader, error) { - var s GetHeader - err := r.ExtractInto(&s) - return &s, err -} - -// ExtractMetadata is a function that takes a GetResult (of type *http.Response) -// and returns the custom metadata associated with the object. -func (r GetResult) ExtractMetadata() (map[string]string, error) { - if r.Err != nil { - return nil, r.Err - } - metadata := make(map[string]string) - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Object-Meta-") { - key := strings.TrimPrefix(k, "X-Object-Meta-") - metadata[key] = v[0] - } - } - return metadata, nil -} - -// CreateHeader represents the headers returned in the response from a -// Create request. -type CreateHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *CreateHeader) UnmarshalJSON(b []byte) error { - type tmp CreateHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = CreateHeader(s.tmp) - - r.Date = time.Time(s.Date) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// CreateResult represents the result of a create operation. -type CreateResult struct { - checksum string - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Create. -func (r CreateResult) Extract() (*CreateHeader, error) { - //if r.Header.Get("ETag") != fmt.Sprintf("%x", localChecksum) { - // return nil, ErrWrongChecksum{} - //} - var s CreateHeader - err := r.ExtractInto(&s) - return &s, err -} - -// UpdateHeader represents the headers returned in the response from a -// Update request. -type UpdateHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *UpdateHeader) UnmarshalJSON(b []byte) error { - type tmp UpdateHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = UpdateHeader(s.tmp) - - r.Date = time.Time(s.Date) - - return nil -} - -// UpdateResult represents the result of an update operation. -type UpdateResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Update. -func (r UpdateResult) Extract() (*UpdateHeader, error) { - var s UpdateHeader - err := r.ExtractInto(&s) - return &s, err -} - -// DeleteHeader represents the headers returned in the response from a -// Delete request. -type DeleteHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - Date time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *DeleteHeader) UnmarshalJSON(b []byte) error { - type tmp DeleteHeader - var s struct { - tmp - Date gophercloud.JSONRFC1123 `json:"Date"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = DeleteHeader(s.tmp) - - r.Date = time.Time(s.Date) - - return nil -} - -// DeleteResult represents the result of a delete operation. -type DeleteResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Delete. -func (r DeleteResult) Extract() (*DeleteHeader, error) { - var s DeleteHeader - err := r.ExtractInto(&s) - return &s, err -} - -// CopyHeader represents the headers returned in the response from a -// Copy request. -type CopyHeader struct { - ContentLength int64 `json:"Content-Length,string"` - ContentType string `json:"Content-Type"` - CopiedFrom string `json:"X-Copied-From"` - CopiedFromLastModified time.Time `json:"-"` - Date time.Time `json:"-"` - ETag string `json:"Etag"` - LastModified time.Time `json:"-"` - TransID string `json:"X-Trans-Id"` -} - -func (r *CopyHeader) UnmarshalJSON(b []byte) error { - type tmp CopyHeader - var s struct { - tmp - CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` - Date gophercloud.JSONRFC1123 `json:"Date"` - LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` - } - err := json.Unmarshal(b, &s) - if err != nil { - return err - } - - *r = CopyHeader(s.tmp) - - r.Date = time.Time(s.Date) - r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) - r.LastModified = time.Time(s.LastModified) - - return nil -} - -// CopyResult represents the result of a copy operation. -type CopyResult struct { - gophercloud.HeaderResult -} - -// Extract will return a struct of headers returned from a call to Copy. -func (r CopyResult) Extract() (*CopyHeader, error) { - var s CopyHeader - err := r.ExtractInto(&s) - return &s, err -} - -type BulkDeleteResponse struct { - ResponseStatus string `json:"Response Status"` - ResponseBody string `json:"Response Body"` - Errors [][]string `json:"Errors"` - NumberDeleted int `json:"Number Deleted"` - NumberNotFound int `json:"Number Not Found"` -} - -// BulkDeleteResult represents the result of a bulk delete operation. To extract -// the response object from the HTTP response, call its Extract method. -type BulkDeleteResult struct { - gophercloud.Result -} - -// Extract will return a BulkDeleteResponse struct returned from a BulkDelete -// call. -func (r BulkDeleteResult) Extract() (*BulkDeleteResponse, error) { - var s BulkDeleteResponse - err := r.ExtractInto(&s) - return &s, err -} - -// extractLastMarker is a function that takes a page of objects and returns the -// marker for the page. This can either be a subdir or the last object's name. -func extractLastMarker(r pagination.Page) (string, error) { - casted := r.(ObjectPage) - - // If a delimiter was requested, check if a subdir exists. - queryParams, err := url.ParseQuery(casted.URL.RawQuery) - if err != nil { - return "", err - } - - var delimeter bool - if v, ok := queryParams["delimiter"]; ok && len(v) > 0 { - delimeter = true - } - - ct := casted.Header.Get("Content-Type") - switch { - case strings.HasPrefix(ct, "application/json"): - parsed, err := ExtractInfo(r) - if err != nil { - return "", err - } - - var lastObject Object - if len(parsed) > 0 { - lastObject = parsed[len(parsed)-1] - } - - if !delimeter { - return lastObject.Name, nil - } - - if lastObject.Name != "" { - return lastObject.Name, nil - } - - return lastObject.Subdir, nil - case strings.HasPrefix(ct, "text/plain"): - names := make([]string, 0, 50) - - body := string(r.(ObjectPage).Body.([]uint8)) - for _, name := range strings.Split(body, "\n") { - if len(name) > 0 { - names = append(names, name) - } - } - - return names[len(names)-1], err - case strings.HasPrefix(ct, "text/html"): - return "", nil - default: - return "", fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) - } -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go deleted file mode 100644 index 918ec94b9bb..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go +++ /dev/null @@ -1,37 +0,0 @@ -package objects - -import ( - "github.com/gophercloud/gophercloud" -) - -func listURL(c *gophercloud.ServiceClient, container string) string { - return c.ServiceURL(container) -} - -func copyURL(c *gophercloud.ServiceClient, container, object string) string { - return c.ServiceURL(container, object) -} - -func createURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func getURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func deleteURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func downloadURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func updateURL(c *gophercloud.ServiceClient, container, object string) string { - return copyURL(c, container, object) -} - -func bulkDeleteURL(c *gophercloud.ServiceClient) string { - return c.Endpoint + "?bulk-delete=true" -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go deleted file mode 100644 index 40080f7af20..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go +++ /dev/null @@ -1,28 +0,0 @@ -package utils - -import ( - "net/url" - "regexp" - "strings" -) - -// BaseEndpoint will return a URL without the /vX.Y -// portion of the URL. -func BaseEndpoint(endpoint string) (string, error) { - u, err := url.Parse(endpoint) - if err != nil { - return "", err - } - - u.RawQuery, u.Fragment = "", "" - - path := u.Path - versionRe := regexp.MustCompile("v[0-9.]+/?") - - if version := versionRe.FindString(path); version != "" { - versionIndex := strings.Index(path, version) - u.Path = path[:versionIndex] - } - - return u.String(), nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go deleted file mode 100644 index 27da19f91a8..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go +++ /dev/null @@ -1,111 +0,0 @@ -package utils - -import ( - "fmt" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// Version is a supported API version, corresponding to a vN package within the appropriate service. -type Version struct { - ID string - Suffix string - Priority int -} - -var goodStatus = map[string]bool{ - "current": true, - "supported": true, - "stable": true, -} - -// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's -// published versions. -// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. -func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { - type linkResp struct { - Href string `json:"href"` - Rel string `json:"rel"` - } - - type valueResp struct { - ID string `json:"id"` - Status string `json:"status"` - Links []linkResp `json:"links"` - } - - type versionsResp struct { - Values []valueResp `json:"values"` - } - - type response struct { - Versions versionsResp `json:"versions"` - } - - normalize := func(endpoint string) string { - if !strings.HasSuffix(endpoint, "/") { - return endpoint + "/" - } - return endpoint - } - identityEndpoint := normalize(client.IdentityEndpoint) - - // If a full endpoint is specified, check version suffixes for a match first. - for _, v := range recognized { - if strings.HasSuffix(identityEndpoint, v.Suffix) { - return v, identityEndpoint, nil - } - } - - var resp response - _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ - JSONResponse: &resp, - OkCodes: []int{200, 300}, - }) - - if err != nil { - return nil, "", err - } - - var highest *Version - var endpoint string - - for _, value := range resp.Versions.Values { - href := "" - for _, link := range value.Links { - if link.Rel == "self" { - href = normalize(link.Href) - } - } - - for _, version := range recognized { - if strings.Contains(value.ID, version.ID) { - // Prefer a version that exactly matches the provided endpoint. - if href == identityEndpoint { - if href == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) - } - return version, href, nil - } - - // Otherwise, find the highest-priority version with a whitelisted status. - if goodStatus[strings.ToLower(value.Status)] { - if highest == nil || version.Priority > highest.Priority { - highest = version - endpoint = href - } - } - } - } - } - - if highest == nil { - return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) - } - if endpoint == "" { - return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) - } - - return highest, endpoint, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go deleted file mode 100644 index df3503159ae..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/http.go +++ /dev/null @@ -1,61 +0,0 @@ -package pagination - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/gophercloud/gophercloud" -) - -// PageResult stores the HTTP response that returned the current page of results. -type PageResult struct { - gophercloud.Result - url.URL -} - -// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the -// results, interpreting it as JSON if the content type indicates. -func PageResultFrom(resp *http.Response) (PageResult, error) { - var parsedBody interface{} - - defer resp.Body.Close() - rawBody, err := ioutil.ReadAll(resp.Body) - if err != nil { - return PageResult{}, err - } - - if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { - err = json.Unmarshal(rawBody, &parsedBody) - if err != nil { - return PageResult{}, err - } - } else { - parsedBody = rawBody - } - - return PageResultFromParsed(resp, parsedBody), err -} - -// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its -// body parsed as JSON (and closed). -func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { - return PageResult{ - Result: gophercloud.Result{ - Body: body, - Header: resp.Header, - }, - URL: *resp.Request.URL, - } -} - -// Request performs an HTTP request and extracts the http.Response from the result. -func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { - return client.Get(url, nil, &gophercloud.RequestOpts{ - MoreHeaders: headers, - OkCodes: []int{200, 204, 300}, - KeepResponseBody: true, - }) -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go deleted file mode 100644 index 3656fb7f8f4..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go +++ /dev/null @@ -1,92 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. -type LinkedPageBase struct { - PageResult - - // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. - // If any link along the path is missing, an empty URL will be returned. - // If any link results in an unexpected value type, an error will be returned. - // When left as "nil", []string{"links", "next"} will be used as a default. - LinkPath []string -} - -// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. -// It assumes that the links are available in a "links" element of the top-level response object. -// If this is not the case, override NextPageURL on your result type. -func (current LinkedPageBase) NextPageURL() (string, error) { - var path []string - var key string - - if current.LinkPath == nil { - path = []string{"links", "next"} - } else { - path = current.LinkPath - } - - submap, ok := current.Body.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return "", err - } - - for { - key, path = path[0], path[1:len(path)] - - value, ok := submap[key] - if !ok { - return "", nil - } - - if len(path) > 0 { - submap, ok = value.(map[string]interface{}) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - } else { - if value == nil { - // Actual null element. - return "", nil - } - - url, ok := value.(string) - if !ok { - err := gophercloud.ErrUnexpectedType{} - err.Expected = "string" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) - return "", err - } - - return url, nil - } - } -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current LinkedPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current LinkedPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go deleted file mode 100644 index 52e53bae850..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go +++ /dev/null @@ -1,58 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. -// For convenience, embed the MarkedPageBase struct. -type MarkerPage interface { - Page - - // LastMarker returns the last "marker" value on this page. - LastMarker() (string, error) -} - -// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. -type MarkerPageBase struct { - PageResult - - // Owner is a reference to the embedding struct. - Owner MarkerPage -} - -// NextPageURL generates the URL for the page of results after this one. -func (current MarkerPageBase) NextPageURL() (string, error) { - currentURL := current.URL - - mark, err := current.Owner.LastMarker() - if err != nil { - return "", err - } - - q := currentURL.Query() - q.Set("marker", mark) - currentURL.RawQuery = q.Encode() - - return currentURL.String(), nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current MarkerPageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the linked page's body. This method is needed to satisfy the -// Page interface. -func (current MarkerPageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go deleted file mode 100644 index 42c0b2dbe5b..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go +++ /dev/null @@ -1,251 +0,0 @@ -package pagination - -import ( - "errors" - "fmt" - "net/http" - "reflect" - "strings" - - "github.com/gophercloud/gophercloud" -) - -var ( - // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. - ErrPageNotAvailable = errors.New("The requested page does not exist.") -) - -// Page must be satisfied by the result type of any resource collection. -// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. -// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, -// instead. -// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type -// will need to implement. -type Page interface { - // NextPageURL generates the URL for the page of data that follows this collection. - // Return "" if no such page exists. - NextPageURL() (string, error) - - // IsEmpty returns true if this Page has no items in it. - IsEmpty() (bool, error) - - // GetBody returns the Page Body. This is used in the `AllPages` method. - GetBody() interface{} -} - -// Pager knows how to advance through a specific resource collection, one page at a time. -type Pager struct { - client *gophercloud.ServiceClient - - initialURL string - - createPage func(r PageResult) Page - - firstPage Page - - Err error - - // Headers supplies additional HTTP headers to populate on each paged request. - Headers map[string]string -} - -// NewPager constructs a manually-configured pager. -// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. -func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { - return Pager{ - client: client, - initialURL: initialURL, - createPage: createPage, - } -} - -// WithPageCreator returns a new Pager that substitutes a different page creation function. This is -// useful for overriding List functions in delegation. -func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { - return Pager{ - client: p.client, - initialURL: p.initialURL, - createPage: createPage, - } -} - -func (p Pager) fetchNextPage(url string) (Page, error) { - resp, err := Request(p.client, p.Headers, url) - if err != nil { - return nil, err - } - - remembered, err := PageResultFrom(resp) - if err != nil { - return nil, err - } - - return p.createPage(remembered), nil -} - -// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. -// Return "false" from the handler to prematurely stop iterating. -func (p Pager) EachPage(handler func(Page) (bool, error)) error { - if p.Err != nil { - return p.Err - } - currentURL := p.initialURL - for { - var currentPage Page - - // if first page has already been fetched, no need to fetch it again - if p.firstPage != nil { - currentPage = p.firstPage - p.firstPage = nil - } else { - var err error - currentPage, err = p.fetchNextPage(currentURL) - if err != nil { - return err - } - } - - empty, err := currentPage.IsEmpty() - if err != nil { - return err - } - if empty { - return nil - } - - ok, err := handler(currentPage) - if err != nil { - return err - } - if !ok { - return nil - } - - currentURL, err = currentPage.NextPageURL() - if err != nil { - return err - } - if currentURL == "" { - return nil - } - } -} - -// AllPages returns all the pages from a `List` operation in a single page, -// allowing the user to retrieve all the pages at once. -func (p Pager) AllPages() (Page, error) { - // pagesSlice holds all the pages until they get converted into as Page Body. - var pagesSlice []interface{} - // body will contain the final concatenated Page body. - var body reflect.Value - - // Grab a first page to ascertain the page body type. - firstPage, err := p.fetchNextPage(p.initialURL) - if err != nil { - return nil, err - } - // Store the page type so we can use reflection to create a new mega-page of - // that type. - pageType := reflect.TypeOf(firstPage) - - // if it's a single page, just return the firstPage (first page) - if _, found := pageType.FieldByName("SinglePageBase"); found { - return firstPage, nil - } - - // store the first page to avoid getting it twice - p.firstPage = firstPage - - // Switch on the page body type. Recognized types are `map[string]interface{}`, - // `[]byte`, and `[]interface{}`. - switch pb := firstPage.GetBody().(type) { - case map[string]interface{}: - // key is the map key for the page body if the body type is `map[string]interface{}`. - var key string - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().(map[string]interface{}) - for k, v := range b { - // If it's a linked page, we don't want the `links`, we want the other one. - if !strings.HasSuffix(k, "links") { - // check the field's type. we only want []interface{} (which is really []map[string]interface{}) - switch vt := v.(type) { - case []interface{}: - key = k - pagesSlice = append(pagesSlice, vt...) - } - } - } - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `map[string]interface{}` - body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) - body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) - case []byte: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]byte) - pagesSlice = append(pagesSlice, b) - // seperate pages with a comma - pagesSlice = append(pagesSlice, []byte{10}) - return true, nil - }) - if err != nil { - return nil, err - } - if len(pagesSlice) > 0 { - // Remove the trailing comma. - pagesSlice = pagesSlice[:len(pagesSlice)-1] - } - var b []byte - // Combine the slice of slices in to a single slice. - for _, slice := range pagesSlice { - b = append(b, slice.([]byte)...) - } - // Set body to value of type `bytes`. - body = reflect.New(reflect.TypeOf(b)).Elem() - body.SetBytes(b) - case []interface{}: - // Iterate over the pages to concatenate the bodies. - err = p.EachPage(func(page Page) (bool, error) { - b := page.GetBody().([]interface{}) - pagesSlice = append(pagesSlice, b...) - return true, nil - }) - if err != nil { - return nil, err - } - // Set body to value of type `[]interface{}` - body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) - for i, s := range pagesSlice { - body.Index(i).Set(reflect.ValueOf(s)) - } - default: - err := gophercloud.ErrUnexpectedType{} - err.Expected = "map[string]interface{}/[]byte/[]interface{}" - err.Actual = fmt.Sprintf("%T", pb) - return nil, err - } - - // Each `Extract*` function is expecting a specific type of page coming back, - // otherwise the type assertion in those functions will fail. pageType is needed - // to create a type in this method that has the same type that the `Extract*` - // function is expecting and set the Body of that object to the concatenated - // pages. - page := reflect.New(pageType) - // Set the page body to be the concatenated pages. - page.Elem().FieldByName("Body").Set(body) - // Set any additional headers that were pass along. The `objectstorage` pacakge, - // for example, passes a Content-Type header. - h := make(http.Header) - for k, v := range p.Headers { - h.Add(k, v) - } - page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) - // Type assert the page to a Page interface so that the type assertion in the - // `Extract*` methods will work. - return page.Elem().Interface().(Page), err -} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go deleted file mode 100644 index 912daea3642..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. -*/ -package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go deleted file mode 100644 index 4251d6491ef..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/pagination/single.go +++ /dev/null @@ -1,33 +0,0 @@ -package pagination - -import ( - "fmt" - "reflect" - - "github.com/gophercloud/gophercloud" -) - -// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. -type SinglePageBase PageResult - -// NextPageURL always returns "" to indicate that there are no more pages to return. -func (current SinglePageBase) NextPageURL() (string, error) { - return "", nil -} - -// IsEmpty satisifies the IsEmpty method of the Page interface -func (current SinglePageBase) IsEmpty() (bool, error) { - if b, ok := current.Body.([]interface{}); ok { - return len(b) == 0, nil - } - err := gophercloud.ErrUnexpectedType{} - err.Expected = "[]interface{}" - err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) - return true, err -} - -// GetBody returns the single page's body. This method is needed to satisfy the -// Page interface. -func (current SinglePageBase) GetBody() interface{} { - return current.Body -} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go deleted file mode 100644 index 219c020a240..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/params.go +++ /dev/null @@ -1,493 +0,0 @@ -package gophercloud - -import ( - "encoding/json" - "fmt" - "net/url" - "reflect" - "strconv" - "strings" - "time" -) - -/* -BuildRequestBody builds a map[string]interface from the given `struct`. If -parent is not an empty string, the final map[string]interface returned will -encapsulate the built one. For example: - - disk := 1 - createOpts := flavors.CreateOpts{ - ID: "1", - Name: "m1.tiny", - Disk: &disk, - RAM: 512, - VCPUs: 1, - RxTxFactor: 1.0, - } - - body, err := gophercloud.BuildRequestBody(createOpts, "flavor") - -The above example can be run as-is, however it is recommended to look at how -BuildRequestBody is used within Gophercloud to more fully understand how it -fits within the request process as a whole rather than use it directly as shown -above. -*/ -func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]interface{}) - if optsValue.Kind() == reflect.Struct { - //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - - if f.Name != strings.Title(f.Name) { - //fmt.Printf("Skipping field: %s...\n", f.Name) - continue - } - - //fmt.Printf("Starting on field: %s...\n", f.Name) - - zero := isZero(v) - //fmt.Printf("v is zero?: %v\n", zero) - - // if the field has a required tag that's set to "true" - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) - // if the field's value is zero, return a missing-argument error - if zero { - // if the field has a 'required' tag, it can't have a zero-value - err := ErrMissingInput{} - err.Argument = f.Name - return nil, err - } - } - - if xorTag := f.Tag.Get("xor"); xorTag != "" { - //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) - xorField := optsValue.FieldByName(xorTag) - var xorFieldIsZero bool - if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { - xorFieldIsZero = true - } else { - if xorField.Kind() == reflect.Ptr { - xorField = xorField.Elem() - } - xorFieldIsZero = isZero(xorField) - } - if !(zero != xorFieldIsZero) { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) - err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) - return nil, err - } - } - - if orTag := f.Tag.Get("or"); orTag != "" { - //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) - //fmt.Printf("field is zero?: %v\n", zero) - if zero { - orField := optsValue.FieldByName(orTag) - var orFieldIsZero bool - if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { - orFieldIsZero = true - } else { - if orField.Kind() == reflect.Ptr { - orField = orField.Elem() - } - orFieldIsZero = isZero(orField) - } - if orFieldIsZero { - err := ErrMissingInput{} - err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) - err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) - return nil, err - } - } - } - - jsonTag := f.Tag.Get("json") - if jsonTag == "-" { - continue - } - - if v.Kind() == reflect.Slice || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Slice) { - sliceValue := v - if sliceValue.Kind() == reflect.Ptr { - sliceValue = sliceValue.Elem() - } - - for i := 0; i < sliceValue.Len(); i++ { - element := sliceValue.Index(i) - if element.Kind() == reflect.Struct || (element.Kind() == reflect.Ptr && element.Elem().Kind() == reflect.Struct) { - _, err := BuildRequestBody(element.Interface(), "") - if err != nil { - return nil, err - } - } - } - } - if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { - if zero { - //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) - if jsonTag != "" { - jsonTagPieces := strings.Split(jsonTag, ",") - if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { - if v.CanSet() { - if !v.IsNil() { - if v.Kind() == reflect.Ptr { - v.Set(reflect.Zero(v.Type())) - } - } - //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) - } - } - } - continue - } - - //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) - _, err := BuildRequestBody(v.Interface(), f.Name) - if err != nil { - return nil, err - } - } - } - - //fmt.Printf("opts: %+v \n", opts) - - b, err := json.Marshal(opts) - if err != nil { - return nil, err - } - - //fmt.Printf("string(b): %s\n", string(b)) - - err = json.Unmarshal(b, &optsMap) - if err != nil { - return nil, err - } - - //fmt.Printf("optsMap: %+v\n", optsMap) - - if parent != "" { - optsMap = map[string]interface{}{parent: optsMap} - } - //fmt.Printf("optsMap after parent added: %+v\n", optsMap) - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -// EnabledState is a convenience type, mostly used in Create and Update -// operations. Because the zero value of a bool is FALSE, we need to use a -// pointer instead to indicate zero-ness. -type EnabledState *bool - -// Convenience vars for EnabledState values. -var ( - iTrue = true - iFalse = false - - Enabled EnabledState = &iTrue - Disabled EnabledState = &iFalse -) - -// IPVersion is a type for the possible IP address versions. Valid instances -// are IPv4 and IPv6 -type IPVersion int - -const ( - // IPv4 is used for IP version 4 addresses - IPv4 IPVersion = 4 - // IPv6 is used for IP version 6 addresses - IPv6 IPVersion = 6 -) - -// IntToPointer is a function for converting integers into integer pointers. -// This is useful when passing in options to operations. -func IntToPointer(i int) *int { - return &i -} - -/* -MaybeString is an internal function to be used by request methods in individual -resource packages. - -It takes a string that might be a zero value and returns either a pointer to its -address or nil. This is useful for allowing users to conveniently omit values -from an options struct by leaving them zeroed, but still pass nil to the JSON -serializer so they'll be omitted from the request body. -*/ -func MaybeString(original string) *string { - if original != "" { - return &original - } - return nil -} - -/* -MaybeInt is an internal function to be used by request methods in individual -resource packages. - -Like MaybeString, it accepts an int that may or may not be a zero value, and -returns either a pointer to its address or nil. It's intended to hint that the -JSON serializer should omit its field. -*/ -func MaybeInt(original int) *int { - if original != 0 { - return &original - } - return nil -} - -/* -func isUnderlyingStructZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Ptr: - return isUnderlyingStructZero(v.Elem()) - default: - return isZero(v) - } -} -*/ - -var t time.Time - -func isZero(v reflect.Value) bool { - //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) - switch v.Kind() { - case reflect.Ptr: - if v.IsNil() { - return true - } - return false - case reflect.Func, reflect.Map, reflect.Slice: - return v.IsNil() - case reflect.Array: - z := true - for i := 0; i < v.Len(); i++ { - z = z && isZero(v.Index(i)) - } - return z - case reflect.Struct: - if v.Type() == reflect.TypeOf(t) { - if v.Interface().(time.Time).IsZero() { - return true - } - return false - } - z := true - for i := 0; i < v.NumField(); i++ { - z = z && isZero(v.Field(i)) - } - return z - } - // Compare other types directly: - z := reflect.Zero(v.Type()) - //fmt.Printf("zero type for value: %+v\n\n\n", z) - return v.Interface() == z.Interface() -} - -/* -BuildQueryString is an internal function to be used by request methods in -individual resource packages. - -It accepts a tagged structure and expands it into a URL struct. Field names are -converted into query parameters based on a "q" tag. For example: - - type struct Something { - Bar string `q:"x_bar"` - Baz int `q:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into "?x_bar=AAA&lorem_ipsum=BBB". - -The struct's fields may be strings, integers, or boolean values. Fields left at -their type's zero value will be omitted from the query. -*/ -func BuildQueryString(opts interface{}) (*url.URL, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - params := url.Values{} - - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - qTag := f.Tag.Get("q") - - // if the field has a 'q' tag, it goes in the query string - if qTag != "" { - tags := strings.Split(qTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - loop: - switch v.Kind() { - case reflect.Ptr: - v = v.Elem() - goto loop - case reflect.String: - params.Add(tags[0], v.String()) - case reflect.Int: - params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) - case reflect.Bool: - params.Add(tags[0], strconv.FormatBool(v.Bool())) - case reflect.Slice: - switch v.Type().Elem() { - case reflect.TypeOf(0): - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) - } - default: - for i := 0; i < v.Len(); i++ { - params.Add(tags[0], v.Index(i).String()) - } - } - case reflect.Map: - if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { - var s []string - for _, k := range v.MapKeys() { - value := v.MapIndex(k).String() - s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) - } - params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) - } - } - } else { - // if the field has a 'required' tag, it can't have a zero-value - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) - } - } - } - } - - return &url.URL{RawQuery: params.Encode()}, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return nil, fmt.Errorf("Options type is not a struct.") -} - -/* -BuildHeaders is an internal function to be used by request methods in -individual resource packages. - -It accepts an arbitrary tagged structure and produces a string map that's -suitable for use as the HTTP headers of an outgoing request. Field names are -mapped to header names based in "h" tags. - - type struct Something { - Bar string `h:"x_bar"` - Baz int `h:"lorem_ipsum"` - } - - instance := Something{ - Bar: "AAA", - Baz: "BBB", - } - -will be converted into: - - map[string]string{ - "x_bar": "AAA", - "lorem_ipsum": "BBB", - } - -Untagged fields and fields left at their zero values are skipped. Integers, -booleans and string values are supported. -*/ -func BuildHeaders(opts interface{}) (map[string]string, error) { - optsValue := reflect.ValueOf(opts) - if optsValue.Kind() == reflect.Ptr { - optsValue = optsValue.Elem() - } - - optsType := reflect.TypeOf(opts) - if optsType.Kind() == reflect.Ptr { - optsType = optsType.Elem() - } - - optsMap := make(map[string]string) - if optsValue.Kind() == reflect.Struct { - for i := 0; i < optsValue.NumField(); i++ { - v := optsValue.Field(i) - f := optsType.Field(i) - hTag := f.Tag.Get("h") - - // if the field has a 'h' tag, it goes in the header - if hTag != "" { - tags := strings.Split(hTag, ",") - - // if the field is set, add it to the slice of query pieces - if !isZero(v) { - switch v.Kind() { - case reflect.String: - optsMap[tags[0]] = v.String() - case reflect.Int: - optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) - case reflect.Int64: - optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) - case reflect.Bool: - optsMap[tags[0]] = strconv.FormatBool(v.Bool()) - } - } else { - // if the field has a 'required' tag, it can't have a zero-value - if requiredTag := f.Tag.Get("required"); requiredTag == "true" { - return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) - } - } - } - - } - return optsMap, nil - } - // Return an error if the underlying type of 'opts' isn't a struct. - return optsMap, fmt.Errorf("Options type is not a struct.") -} - -// IDSliceToQueryString takes a slice of elements and converts them into a query -// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the -// result would be `?name=20&name=40&name=60' -func IDSliceToQueryString(name string, ids []int) string { - str := "" - for k, v := range ids { - if k == 0 { - str += "?" - } else { - str += "&" - } - str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) - } - return str -} - -// IntWithinRange returns TRUE if an integer falls within a defined range, and -// FALSE if not. -func IntWithinRange(val, min, max int) bool { - return val > min && val < max -} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go deleted file mode 100644 index 53b3ecf27f5..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/provider_client.go +++ /dev/null @@ -1,566 +0,0 @@ -package gophercloud - -import ( - "bytes" - "context" - "encoding/json" - "errors" - "io" - "io/ioutil" - "net/http" - "strings" - "sync" -) - -// DefaultUserAgent is the default User-Agent string set in the request header. -const DefaultUserAgent = "gophercloud/2.0.0" - -// UserAgent represents a User-Agent header. -type UserAgent struct { - // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. - // All the strings to prepend are accumulated and prepended in the Join method. - prepend []string -} - -// Prepend prepends a user-defined string to the default User-Agent string. Users -// may pass in one or more strings to prepend. -func (ua *UserAgent) Prepend(s ...string) { - ua.prepend = append(s, ua.prepend...) -} - -// Join concatenates all the user-defined User-Agend strings with the default -// Gophercloud User-Agent string. -func (ua *UserAgent) Join() string { - uaSlice := append(ua.prepend, DefaultUserAgent) - return strings.Join(uaSlice, " ") -} - -// ProviderClient stores details that are required to interact with any -// services within a specific provider's API. -// -// Generally, you acquire a ProviderClient by calling the NewClient method in -// the appropriate provider's child package, providing whatever authentication -// credentials are required. -type ProviderClient struct { - // IdentityBase is the base URL used for a particular provider's identity - // service - it will be used when issuing authenticatation requests. It - // should point to the root resource of the identity service, not a specific - // identity version. - IdentityBase string - - // IdentityEndpoint is the identity endpoint. This may be a specific version - // of the identity service. If this is the case, this endpoint is used rather - // than querying versions first. - IdentityEndpoint string - - // TokenID is the ID of the most recently issued valid token. - // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. - // To safely read or write this value, call `Token` or `SetToken`, respectively - TokenID string - - // EndpointLocator describes how this provider discovers the endpoints for - // its constituent services. - EndpointLocator EndpointLocator - - // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. - HTTPClient http.Client - - // UserAgent represents the User-Agent header in the HTTP request. - UserAgent UserAgent - - // ReauthFunc is the function used to re-authenticate the user if the request - // fails with a 401 HTTP response code. This a needed because there may be multiple - // authentication functions for different Identity service versions. - ReauthFunc func() error - - // Throwaway determines whether if this client is a throw-away client. It's a copy of user's provider client - // with the token and reauth func zeroed. Such client can be used to perform reauthorization. - Throwaway bool - - // Context is the context passed to the HTTP request. - Context context.Context - - // mut is a mutex for the client. It protects read and write access to client attributes such as getting - // and setting the TokenID. - mut *sync.RWMutex - - // reauthmut is a mutex for reauthentication it attempts to ensure that only one reauthentication - // attempt happens at one time. - reauthmut *reauthlock - - authResult AuthResult -} - -// reauthlock represents a set of attributes used to help in the reauthentication process. -type reauthlock struct { - sync.RWMutex - ongoing *reauthFuture -} - -// reauthFuture represents future result of the reauthentication process. -// while done channel is not closed, reauthentication is in progress. -// when done channel is closed, err contains the result of reauthentication. -type reauthFuture struct { - done chan struct{} - err error -} - -func newReauthFuture() *reauthFuture { - return &reauthFuture{ - make(chan struct{}), - nil, - } -} - -func (f *reauthFuture) Set(err error) { - f.err = err - close(f.done) -} - -func (f *reauthFuture) Get() error { - <-f.done - return f.err -} - -// AuthenticatedHeaders returns a map of HTTP headers that are common for all -// authenticated service requests. Blocks if Reauthenticate is in progress. -func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { - if client.IsThrowaway() { - return - } - if client.reauthmut != nil { - // If a Reauthenticate is in progress, wait for it to complete. - client.reauthmut.Lock() - ongoing := client.reauthmut.ongoing - client.reauthmut.Unlock() - if ongoing != nil { - _ = ongoing.Get() - } - } - t := client.Token() - if t == "" { - return - } - return map[string]string{"X-Auth-Token": t} -} - -// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. -// If the application's ProviderClient is not used concurrently, this doesn't need to be called. -func (client *ProviderClient) UseTokenLock() { - client.mut = new(sync.RWMutex) - client.reauthmut = new(reauthlock) -} - -// GetAuthResult returns the result from the request that was used to obtain a -// provider client's Keystone token. -// -// The result is nil when authentication has not yet taken place, when the token -// was set manually with SetToken(), or when a ReauthFunc was used that does not -// record the AuthResult. -func (client *ProviderClient) GetAuthResult() AuthResult { - if client.mut != nil { - client.mut.RLock() - defer client.mut.RUnlock() - } - return client.authResult -} - -// Token safely reads the value of the auth token from the ProviderClient. Applications should -// call this method to access the token instead of the TokenID field -func (client *ProviderClient) Token() string { - if client.mut != nil { - client.mut.RLock() - defer client.mut.RUnlock() - } - return client.TokenID -} - -// SetToken safely sets the value of the auth token in the ProviderClient. Applications may -// use this method in a custom ReauthFunc. -// -// WARNING: This function is deprecated. Use SetTokenAndAuthResult() instead. -func (client *ProviderClient) SetToken(t string) { - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - client.TokenID = t - client.authResult = nil -} - -// SetTokenAndAuthResult safely sets the value of the auth token in the -// ProviderClient and also records the AuthResult that was returned from the -// token creation request. Applications may call this in a custom ReauthFunc. -func (client *ProviderClient) SetTokenAndAuthResult(r AuthResult) error { - tokenID := "" - var err error - if r != nil { - tokenID, err = r.ExtractTokenID() - if err != nil { - return err - } - } - - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - client.TokenID = tokenID - client.authResult = r - return nil -} - -// CopyTokenFrom safely copies the token from another ProviderClient into the -// this one. -func (client *ProviderClient) CopyTokenFrom(other *ProviderClient) { - if client.mut != nil { - client.mut.Lock() - defer client.mut.Unlock() - } - if other.mut != nil && other.mut != client.mut { - other.mut.RLock() - defer other.mut.RUnlock() - } - client.TokenID = other.TokenID - client.authResult = other.authResult -} - -// IsThrowaway safely reads the value of the client Throwaway field. -func (client *ProviderClient) IsThrowaway() bool { - if client.reauthmut != nil { - client.reauthmut.RLock() - defer client.reauthmut.RUnlock() - } - return client.Throwaway -} - -// SetThrowaway safely sets the value of the client Throwaway field. -func (client *ProviderClient) SetThrowaway(v bool) { - if client.reauthmut != nil { - client.reauthmut.Lock() - defer client.reauthmut.Unlock() - } - client.Throwaway = v -} - -// Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is -// called because of a 401 response, the caller may pass the previous token. In -// this case, the reauthentication can be skipped if another thread has already -// reauthenticated in the meantime. If no previous token is known, an empty -// string should be passed instead to force unconditional reauthentication. -func (client *ProviderClient) Reauthenticate(previousToken string) error { - if client.ReauthFunc == nil { - return nil - } - - if client.reauthmut == nil { - return client.ReauthFunc() - } - - future := newReauthFuture() - - // Check if a Reauthenticate is in progress, or start one if not. - client.reauthmut.Lock() - ongoing := client.reauthmut.ongoing - if ongoing == nil { - client.reauthmut.ongoing = future - } - client.reauthmut.Unlock() - - // If Reauthenticate is running elsewhere, wait for its result. - if ongoing != nil { - return ongoing.Get() - } - - // Perform the actual reauthentication. - var err error - if previousToken == "" || client.TokenID == previousToken { - err = client.ReauthFunc() - } else { - err = nil - } - - // Mark Reauthenticate as finished. - client.reauthmut.Lock() - client.reauthmut.ongoing.Set(err) - client.reauthmut.ongoing = nil - client.reauthmut.Unlock() - - return err -} - -// RequestOpts customizes the behavior of the provider.Request() method. -type RequestOpts struct { - // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The - // content type of the request will default to "application/json" unless overridden by MoreHeaders. - // It's an error to specify both a JSONBody and a RawBody. - JSONBody interface{} - // RawBody contains an io.Reader that will be consumed by the request directly. No content-type - // will be set unless one is provided explicitly by MoreHeaders. - RawBody io.Reader - // JSONResponse, if provided, will be populated with the contents of the response body parsed as - // JSON. - JSONResponse interface{} - // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If - // the response has a different code, an error will be returned. - OkCodes []int - // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is - // provided with a blank value (""), that header will be *omitted* instead: use this to suppress - // the default Accept header or an inferred Content-Type, for example. - MoreHeaders map[string]string - // ErrorContext specifies the resource error type to return if an error is encountered. - // This lets resources override default error messages based on the response status code. - ErrorContext error - // KeepResponseBody specifies whether to keep the HTTP response body. Usually used, when the HTTP - // response body is considered for further use. Valid when JSONResponse is nil. - KeepResponseBody bool -} - -// requestState contains temporary state for a single ProviderClient.Request() call. -type requestState struct { - // This flag indicates if we have reauthenticated during this request because of a 401 response. - // It ensures that we don't reauthenticate multiple times for a single request. If we - // reauthenticate, but keep getting 401 responses with the fresh token, reauthenticating some more - // will just get us into an infinite loop. - hasReauthenticated bool -} - -var applicationJSON = "application/json" - -// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication -// header will automatically be provided. -func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - return client.doRequest(method, url, options, &requestState{ - hasReauthenticated: false, - }) -} - -func (client *ProviderClient) doRequest(method, url string, options *RequestOpts, state *requestState) (*http.Response, error) { - var body io.Reader - var contentType *string - - // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided - // io.ReadSeeker as-is. Default the content-type to application/json. - if options.JSONBody != nil { - if options.RawBody != nil { - return nil, errors.New("please provide only one of JSONBody or RawBody to gophercloud.Request()") - } - - rendered, err := json.Marshal(options.JSONBody) - if err != nil { - return nil, err - } - - body = bytes.NewReader(rendered) - contentType = &applicationJSON - } - - // Return an error, when "KeepResponseBody" is true and "JSONResponse" is not nil - if options.KeepResponseBody && options.JSONResponse != nil { - return nil, errors.New("cannot use KeepResponseBody when JSONResponse is not nil") - } - - if options.RawBody != nil { - body = options.RawBody - } - - // Construct the http.Request. - req, err := http.NewRequest(method, url, body) - if err != nil { - return nil, err - } - if client.Context != nil { - req = req.WithContext(client.Context) - } - - // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to - // modify or omit any header. - if contentType != nil { - req.Header.Set("Content-Type", *contentType) - } - req.Header.Set("Accept", applicationJSON) - - // Set the User-Agent header - req.Header.Set("User-Agent", client.UserAgent.Join()) - - if options.MoreHeaders != nil { - for k, v := range options.MoreHeaders { - if v != "" { - req.Header.Set(k, v) - } else { - req.Header.Del(k) - } - } - } - - // get latest token from client - for k, v := range client.AuthenticatedHeaders() { - req.Header.Set(k, v) - } - - prereqtok := req.Header.Get("X-Auth-Token") - - // Issue the request. - resp, err := client.HTTPClient.Do(req) - if err != nil { - return nil, err - } - - // Allow default OkCodes if none explicitly set - okc := options.OkCodes - if okc == nil { - okc = defaultOkCodes(method) - } - - // Validate the HTTP response status. - var ok bool - for _, code := range okc { - if resp.StatusCode == code { - ok = true - break - } - } - - if !ok { - body, _ := ioutil.ReadAll(resp.Body) - resp.Body.Close() - respErr := ErrUnexpectedResponseCode{ - URL: url, - Method: method, - Expected: options.OkCodes, - Actual: resp.StatusCode, - Body: body, - ResponseHeader: resp.Header, - } - - errType := options.ErrorContext - switch resp.StatusCode { - case http.StatusBadRequest: - err = ErrDefault400{respErr} - if error400er, ok := errType.(Err400er); ok { - err = error400er.Error400(respErr) - } - case http.StatusUnauthorized: - if client.ReauthFunc != nil && !state.hasReauthenticated { - err = client.Reauthenticate(prereqtok) - if err != nil { - e := &ErrUnableToReauthenticate{} - e.ErrOriginal = respErr - return nil, e - } - if options.RawBody != nil { - if seeker, ok := options.RawBody.(io.Seeker); ok { - seeker.Seek(0, 0) - } - } - state.hasReauthenticated = true - resp, err = client.doRequest(method, url, options, state) - if err != nil { - switch err.(type) { - case *ErrUnexpectedResponseCode: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err.(*ErrUnexpectedResponseCode) - return nil, e - default: - e := &ErrErrorAfterReauthentication{} - e.ErrOriginal = err - return nil, e - } - } - return resp, nil - } - err = ErrDefault401{respErr} - if error401er, ok := errType.(Err401er); ok { - err = error401er.Error401(respErr) - } - case http.StatusForbidden: - err = ErrDefault403{respErr} - if error403er, ok := errType.(Err403er); ok { - err = error403er.Error403(respErr) - } - case http.StatusNotFound: - err = ErrDefault404{respErr} - if error404er, ok := errType.(Err404er); ok { - err = error404er.Error404(respErr) - } - case http.StatusMethodNotAllowed: - err = ErrDefault405{respErr} - if error405er, ok := errType.(Err405er); ok { - err = error405er.Error405(respErr) - } - case http.StatusRequestTimeout: - err = ErrDefault408{respErr} - if error408er, ok := errType.(Err408er); ok { - err = error408er.Error408(respErr) - } - case http.StatusConflict: - err = ErrDefault409{respErr} - if error409er, ok := errType.(Err409er); ok { - err = error409er.Error409(respErr) - } - case 429: - err = ErrDefault429{respErr} - if error429er, ok := errType.(Err429er); ok { - err = error429er.Error429(respErr) - } - case http.StatusInternalServerError: - err = ErrDefault500{respErr} - if error500er, ok := errType.(Err500er); ok { - err = error500er.Error500(respErr) - } - case http.StatusServiceUnavailable: - err = ErrDefault503{respErr} - if error503er, ok := errType.(Err503er); ok { - err = error503er.Error503(respErr) - } - } - - if err == nil { - err = respErr - } - - return resp, err - } - - // Parse the response body as JSON, if requested to do so. - if options.JSONResponse != nil { - defer resp.Body.Close() - // Don't decode JSON when there is no content - if resp.StatusCode == http.StatusNoContent { - // read till EOF, otherwise the connection will be closed and cannot be reused - _, err = io.Copy(ioutil.Discard, resp.Body) - return resp, err - } - if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { - return nil, err - } - } - - // Close unused body to allow the HTTP connection to be reused - if !options.KeepResponseBody && options.JSONResponse == nil { - defer resp.Body.Close() - // read till EOF, otherwise the connection will be closed and cannot be reused - if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { - return nil, err - } - } - - return resp, nil -} - -func defaultOkCodes(method string) []int { - switch method { - case "GET", "HEAD": - return []int{200} - case "POST": - return []int{201, 202} - case "PUT": - return []int{201, 202} - case "PATCH": - return []int{200, 202, 204} - case "DELETE": - return []int{202, 204} - } - - return []int{} -} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go deleted file mode 100644 index 1b608103b7f..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/results.go +++ /dev/null @@ -1,460 +0,0 @@ -package gophercloud - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "reflect" - "strconv" - "time" -) - -/* -Result is an internal type to be used by individual resource packages, but its -methods will be available on a wide variety of user-facing embedding types. - -It acts as a base struct that other Result types, returned from request -functions, can embed for convenience. All Results capture basic information -from the HTTP transaction that was performed, including the response body, -HTTP headers, and any errors that happened. - -Generally, each Result type will have an Extract method that can be used to -further interpret the result's payload in a specific context. Extensions or -providers can then provide additional extraction functions to pull out -provider- or extension-specific information as well. -*/ -type Result struct { - // Body is the payload of the HTTP response from the server. In most cases, - // this will be the deserialized JSON structure. - Body interface{} - - // Header contains the HTTP header structure from the original response. - Header http.Header - - // Err is an error that occurred during the operation. It's deferred until - // extraction to make it easier to chain the Extract call. - Err error -} - -// ExtractInto allows users to provide an object into which `Extract` will extract -// the `Result.Body`. This would be useful for OpenStack providers that have -// different fields in the response object than OpenStack proper. -func (r Result) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - if reader, ok := r.Body.(io.Reader); ok { - if readCloser, ok := reader.(io.Closer); ok { - defer readCloser.Close() - } - return json.NewDecoder(reader).Decode(to) - } - - b, err := json.Marshal(r.Body) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -func (r Result) extractIntoPtr(to interface{}, label string) error { - if label == "" { - return r.ExtractInto(&to) - } - - var m map[string]interface{} - err := r.ExtractInto(&m) - if err != nil { - return err - } - - b, err := json.Marshal(m[label]) - if err != nil { - return err - } - - toValue := reflect.ValueOf(to) - if toValue.Kind() == reflect.Ptr { - toValue = toValue.Elem() - } - - switch toValue.Kind() { - case reflect.Slice: - typeOfV := toValue.Type().Elem() - if typeOfV.Kind() == reflect.Struct { - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) - - if mSlice, ok := m[label].([]interface{}); ok { - for _, v := range mSlice { - // For each iteration of the slice, we create a new struct. - // This is to work around a bug where elements of a slice - // are reused and not overwritten when the same copy of the - // struct is used: - // - // https://github.com/golang/go/issues/21092 - // https://github.com/golang/go/issues/24155 - // https://play.golang.org/p/NHo3ywlPZli - newType := reflect.New(typeOfV).Elem() - - b, err := json.Marshal(v) - if err != nil { - return err - } - - // This is needed for structs with an UnmarshalJSON method. - // Technically this is just unmarshalling the response into - // a struct that is never used, but it's good enough to - // trigger the UnmarshalJSON method. - for i := 0; i < newType.NumField(); i++ { - s := newType.Field(i).Addr().Interface() - - // Unmarshal is used rather than NewDecoder to also work - // around the above-mentioned bug. - err = json.Unmarshal(b, s) - if err != nil { - return err - } - } - - newSlice = reflect.Append(newSlice, newType) - } - } - - // "to" should now be properly modeled to receive the - // JSON response body and unmarshal into all the correct - // fields of the struct or composed extension struct - // at the end of this method. - toValue.Set(newSlice) - - // jtopjian: This was put into place to resolve the issue - // described at - // https://github.com/gophercloud/gophercloud/issues/1963 - // - // This probably isn't the best fix, but it appears to - // be resolving the issue, so I'm going to implement it - // for now. - // - // For future readers, this entire case statement could - // use a review. - return nil - } - } - case reflect.Struct: - typeOfV := toValue.Type() - if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { - for i := 0; i < toValue.NumField(); i++ { - toField := toValue.Field(i) - if toField.Kind() == reflect.Struct { - s := toField.Addr().Interface() - err = json.NewDecoder(bytes.NewReader(b)).Decode(s) - if err != nil { - return err - } - } - } - } - } - - err = json.Unmarshal(b, &to) - return err -} - -// ExtractIntoStructPtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying struct type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Struct: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to struct, got: %v", t) - } -} - -// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided -// interface{} (to). -// -// NOTE: For internal use only -// -// `to` must be a pointer to an underlying slice type -// -// If provided, `label` will be filtered out of the response -// body prior to `r` being unmarshalled into `to`. -func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { - if r.Err != nil { - return r.Err - } - - t := reflect.TypeOf(to) - if k := t.Kind(); k != reflect.Ptr { - return fmt.Errorf("Expected pointer, got %v", k) - } - switch t.Elem().Kind() { - case reflect.Slice: - return r.extractIntoPtr(to, label) - default: - return fmt.Errorf("Expected pointer to slice, got: %v", t) - } -} - -// PrettyPrintJSON creates a string containing the full response body as -// pretty-printed JSON. It's useful for capturing test fixtures and for -// debugging extraction bugs. If you include its output in an issue related to -// a buggy extraction function, we will all love you forever. -func (r Result) PrettyPrintJSON() string { - pretty, err := json.MarshalIndent(r.Body, "", " ") - if err != nil { - panic(err.Error()) - } - return string(pretty) -} - -// ErrResult is an internal type to be used by individual resource packages, but -// its methods will be available on a wide variety of user-facing embedding -// types. -// -// It represents results that only contain a potential error and -// nothing else. Usually, if the operation executed successfully, the Err field -// will be nil; otherwise it will be stocked with a relevant error. Use the -// ExtractErr method -// to cleanly pull it out. -type ErrResult struct { - Result -} - -// ExtractErr is a function that extracts error information, or nil, from a result. -func (r ErrResult) ExtractErr() error { - return r.Err -} - -/* -HeaderResult is an internal type to be used by individual resource packages, but -its methods will be available on a wide variety of user-facing embedding types. - -It represents a result that only contains an error (possibly nil) and an -http.Header. This is used, for example, by the objectstorage packages in -openstack, because most of the operations don't return response bodies, but do -have relevant information in headers. -*/ -type HeaderResult struct { - Result -} - -// ExtractInto allows users to provide an object into which `Extract` will -// extract the http.Header headers of the result. -func (r HeaderResult) ExtractInto(to interface{}) error { - if r.Err != nil { - return r.Err - } - - tmpHeaderMap := map[string]string{} - for k, v := range r.Header { - if len(v) > 0 { - tmpHeaderMap[k] = v[0] - } - } - - b, err := json.Marshal(tmpHeaderMap) - if err != nil { - return err - } - err = json.Unmarshal(b, to) - - return err -} - -// RFC3339Milli describes a common time format used by some API responses. -const RFC3339Milli = "2006-01-02T15:04:05.999999Z" - -type JSONRFC3339Milli time.Time - -func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { - b := bytes.NewBuffer(data) - dec := json.NewDecoder(b) - var s string - if err := dec.Decode(&s); err != nil { - return err - } - t, err := time.Parse(RFC3339Milli, s) - if err != nil { - return err - } - *jt = JSONRFC3339Milli(t) - return nil -} - -const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" - -type JSONRFC3339MilliNoZ time.Time - -func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339MilliNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339MilliNoZ(t) - return nil -} - -type JSONRFC1123 time.Time - -func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(time.RFC1123, s) - if err != nil { - return err - } - *jt = JSONRFC1123(t) - return nil -} - -type JSONUnix time.Time - -func (jt *JSONUnix) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - unix, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return err - } - t = time.Unix(unix, 0) - *jt = JSONUnix(t) - return nil -} - -// RFC3339NoZ is the time format used in Heat (Orchestration). -const RFC3339NoZ = "2006-01-02T15:04:05" - -type JSONRFC3339NoZ time.Time - -func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339NoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339NoZ(t) - return nil -} - -// RFC3339ZNoT is the time format used in Zun (Containers Service). -const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" - -type JSONRFC3339ZNoT time.Time - -func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339ZNoT, s) - if err != nil { - return err - } - *jt = JSONRFC3339ZNoT(t) - return nil -} - -// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). -const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" - -type JSONRFC3339ZNoTNoZ time.Time - -func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { - var s string - if err := json.Unmarshal(data, &s); err != nil { - return err - } - if s == "" { - return nil - } - t, err := time.Parse(RFC3339ZNoTNoZ, s) - if err != nil { - return err - } - *jt = JSONRFC3339ZNoTNoZ(t) - return nil -} - -/* -Link is an internal type to be used in packages of collection resources that are -paginated in a certain way. - -It's a response substructure common to many paginated collection results that is -used to point to related pages. Usually, the one we care about is the one with -Rel field set to "next". -*/ -type Link struct { - Href string `json:"href"` - Rel string `json:"rel"` -} - -/* -ExtractNextURL is an internal function useful for packages of collection -resources that are paginated in a certain way. - -It attempts to extract the "next" URL from slice of Link structs, or -"" if no such URL is present. -*/ -func ExtractNextURL(links []Link) (string, error) { - var url string - - for _, l := range links { - if l.Rel == "next" { - url = l.Href - } - } - - if url == "" { - return "", nil - } - - return url, nil -} diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go deleted file mode 100644 index dd54abe30ef..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/service_client.go +++ /dev/null @@ -1,162 +0,0 @@ -package gophercloud - -import ( - "io" - "net/http" - "strings" -) - -// ServiceClient stores details required to interact with a specific service API implemented by a provider. -// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. -type ServiceClient struct { - // ProviderClient is a reference to the provider that implements this service. - *ProviderClient - - // Endpoint is the base URL of the service's API, acquired from a service catalog. - // It MUST end with a /. - Endpoint string - - // ResourceBase is the base URL shared by the resources within a service's API. It should include - // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used - // as-is, instead. - ResourceBase string - - // This is the service client type (e.g. compute, sharev2). - // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. - // It is only exported because it gets set in a different package. - Type string - - // The microversion of the service to use. Set this to use a particular microversion. - Microversion string - - // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, - // values set in this field will be set on all the HTTP requests the service client sends. - MoreHeaders map[string]string -} - -// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. -func (client *ServiceClient) ResourceBaseURL() string { - if client.ResourceBase != "" { - return client.ResourceBase - } - return client.Endpoint -} - -// ServiceURL constructs a URL for a resource belonging to this provider. -func (client *ServiceClient) ServiceURL(parts ...string) string { - return client.ResourceBaseURL() + strings.Join(parts, "/") -} - -func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { - if v, ok := (JSONBody).(io.Reader); ok { - opts.RawBody = v - } else if JSONBody != nil { - opts.JSONBody = JSONBody - } - - if JSONResponse != nil { - opts.JSONResponse = JSONResponse - } - - if opts.MoreHeaders == nil { - opts.MoreHeaders = make(map[string]string) - } - - if client.Microversion != "" { - client.setMicroversionHeader(opts) - } -} - -// Get calls `Request` with the "GET" HTTP verb. -func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, JSONResponse, opts) - return client.Request("GET", url, opts) -} - -// Post calls `Request` with the "POST" HTTP verb. -func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("POST", url, opts) -} - -// Put calls `Request` with the "PUT" HTTP verb. -func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PUT", url, opts) -} - -// Patch calls `Request` with the "PATCH" HTTP verb. -func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, JSONBody, JSONResponse, opts) - return client.Request("PATCH", url, opts) -} - -// Delete calls `Request` with the "DELETE" HTTP verb. -func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("DELETE", url, opts) -} - -// Head calls `Request` with the "HEAD" HTTP verb. -func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { - if opts == nil { - opts = new(RequestOpts) - } - client.initReqOpts(url, nil, nil, opts) - return client.Request("HEAD", url, opts) -} - -func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { - switch client.Type { - case "compute": - opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion - case "sharev2": - opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion - case "volume": - opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion - case "baremetal": - opts.MoreHeaders["X-OpenStack-Ironic-API-Version"] = client.Microversion - case "baremetal-introspection": - opts.MoreHeaders["X-OpenStack-Ironic-Inspector-API-Version"] = client.Microversion - } - - if client.Type != "" { - opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion - } -} - -// Request carries out the HTTP operation for the service client -func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { - if len(client.MoreHeaders) > 0 { - if options == nil { - options = new(RequestOpts) - } - for k, v := range client.MoreHeaders { - options.MoreHeaders[k] = v - } - } - return client.ProviderClient.Request(method, url, options) -} - -// ParseResponse is a helper function to parse http.Response to constituents. -func ParseResponse(resp *http.Response, err error) (io.ReadCloser, http.Header, error) { - if resp != nil { - return resp.Body, resp.Header, err - } - return nil, nil, err -} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go deleted file mode 100644 index 68f9a5d3eca..00000000000 --- a/vendor/github.com/gophercloud/gophercloud/util.go +++ /dev/null @@ -1,102 +0,0 @@ -package gophercloud - -import ( - "fmt" - "net/url" - "path/filepath" - "strings" - "time" -) - -// WaitFor polls a predicate function, once per second, up to a timeout limit. -// This is useful to wait for a resource to transition to a certain state. -// To handle situations when the predicate might hang indefinitely, the -// predicate will be prematurely cancelled after the timeout. -// Resource packages will wrap this in a more convenient function that's -// specific to a certain resource, but it can also be useful on its own. -func WaitFor(timeout int, predicate func() (bool, error)) error { - type WaitForResult struct { - Success bool - Error error - } - - start := time.Now().Unix() - - for { - // If a timeout is set, and that's been exceeded, shut it down. - if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { - return fmt.Errorf("A timeout occurred") - } - - time.Sleep(1 * time.Second) - - var result WaitForResult - ch := make(chan bool, 1) - go func() { - defer close(ch) - satisfied, err := predicate() - result.Success = satisfied - result.Error = err - }() - - select { - case <-ch: - if result.Error != nil { - return result.Error - } - if result.Success { - return nil - } - // If the predicate has not finished by the timeout, cancel it. - case <-time.After(time.Duration(timeout) * time.Second): - return fmt.Errorf("A timeout occurred") - } - } -} - -// NormalizeURL is an internal function to be used by provider clients. -// -// It ensures that each endpoint URL has a closing `/`, as expected by -// ServiceClient's methods. -func NormalizeURL(url string) string { - if !strings.HasSuffix(url, "/") { - return url + "/" - } - return url -} - -// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as -// a reference in the filesystem, if necessary. basePath is assumed to contain -// either '.' when first used, or the file:// type fqdn of the parent resource. -// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml -func NormalizePathURL(basePath, rawPath string) (string, error) { - u, err := url.Parse(rawPath) - if err != nil { - return "", err - } - // if a scheme is defined, it must be a fqdn already - if u.Scheme != "" { - return u.String(), nil - } - // if basePath is a url, then child resources are assumed to be relative to it - bu, err := url.Parse(basePath) - if err != nil { - return "", err - } - var basePathSys, absPathSys string - if bu.Scheme != "" { - basePathSys = filepath.FromSlash(bu.Path) - absPathSys = filepath.Join(basePathSys, rawPath) - bu.Path = filepath.ToSlash(absPathSys) - return bu.String(), nil - } - - absPathSys = filepath.Join(basePath, rawPath) - u.Path = filepath.ToSlash(absPathSys) - if err != nil { - return "", err - } - u.Scheme = "file" - return u.String(), nil - -} diff --git a/vendor/github.com/hashicorp/consul/api/agent.go b/vendor/github.com/hashicorp/consul/api/agent.go index 3e1136895f4..a4cc143f05b 100644 --- a/vendor/github.com/hashicorp/consul/api/agent.go +++ b/vendor/github.com/hashicorp/consul/api/agent.go @@ -93,6 +93,8 @@ type AgentService struct { // to include the Namespace in the hash. When we do, then we are in for lots of fun with tests. // For now though, ignoring it works well enough. Namespace string `json:",omitempty" bexpr:"-" hash:"ignore"` + // Datacenter is only ever returned and is ignored if presented. + Datacenter string `json:",omitempty" bexpr:"-" hash:"ignore"` } // AgentServiceChecksInfo returns information about a Service and its checks @@ -162,7 +164,7 @@ const ( // MemberTagKeyReadReplica is the key used to indicate that the member is a read // replica server (will remain a Raft non-voter). // Read Replicas are a Consul Enterprise feature. - MemberTagKeyReadReplica = "nonvoter" + MemberTagKeyReadReplica = "read_replica" // MemberTagValueReadReplica is the value of the MemberTagKeyReadReplica key when // the member is in fact a read-replica. Any other value indicates that it is not. // Read Replicas are a Consul Enterprise feature. @@ -187,10 +189,18 @@ const ( // AgentMember represents a cluster member known to the agent type AgentMember struct { - Name string - Addr string - Port uint16 - Tags map[string]string + Name string + Addr string + Port uint16 + Tags map[string]string + // Status of the Member which corresponds to github.com/hashicorp/serf/serf.MemberStatus + // Value is one of: + // + // AgentMemberNone = 0 + // AgentMemberAlive = 1 + // AgentMemberLeaving = 2 + // AgentMemberLeft = 3 + // AgentMemberFailed = 4 Status int ProtocolMin uint8 ProtocolMax uint8 diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index 38a4e98fbd2..08f00c40695 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -254,6 +254,11 @@ type QueryMeta struct { // CacheAge is set if request was ?cached and indicates how stale the cached // response is. CacheAge time.Duration + + // DefaultACLPolicy is used to control the ACL interaction when there is no + // defined policy. This can be "allow" which means ACLs are used to + // deny-list, or "deny" which means ACLs are allow-lists. + DefaultACLPolicy string } // WriteMeta is used to return meta data about a write @@ -305,7 +310,7 @@ type Config struct { TokenFile string // Namespace is the name of the namespace to send along for the request - // when no other Namespace ispresent in the QueryOptions + // when no other Namespace is present in the QueryOptions Namespace string TLSConfig TLSConfig @@ -962,6 +967,12 @@ func parseQueryMeta(resp *http.Response, q *QueryMeta) error { q.AddressTranslationEnabled = false } + // Parse X-Consul-Default-ACL-Policy + switch v := header.Get("X-Consul-Default-ACL-Policy"); v { + case "allow", "deny": + q.DefaultACLPolicy = v + } + // Parse Cache info if cacheStr := header.Get("X-Cache"); cacheStr != "" { q.CacheHit = strings.EqualFold(cacheStr, "HIT") diff --git a/vendor/github.com/hashicorp/consul/api/config_entry.go b/vendor/github.com/hashicorp/consul/api/config_entry.go index a234f6eb255..f5ef60e2949 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry.go @@ -7,6 +7,7 @@ import ( "io" "strconv" "strings" + "time" "github.com/mitchellh/mapstructure" ) @@ -19,6 +20,7 @@ const ( ServiceResolver string = "service-resolver" IngressGateway string = "ingress-gateway" TerminatingGateway string = "terminating-gateway" + ServiceIntentions string = "service-intentions" ProxyConfigGlobal string = "global" ) @@ -26,6 +28,8 @@ const ( type ConfigEntry interface { GetKind() string GetName() string + GetNamespace() string + GetMeta() map[string]string GetCreateIndex() uint64 GetModifyIndex() uint64 } @@ -108,6 +112,14 @@ func (s *ServiceConfigEntry) GetName() string { return s.Name } +func (s *ServiceConfigEntry) GetNamespace() string { + return s.Namespace +} + +func (s *ServiceConfigEntry) GetMeta() map[string]string { + return s.Meta +} + func (s *ServiceConfigEntry) GetCreateIndex() uint64 { return s.CreateIndex } @@ -136,6 +148,14 @@ func (p *ProxyConfigEntry) GetName() string { return p.Name } +func (p *ProxyConfigEntry) GetNamespace() string { + return p.Namespace +} + +func (p *ProxyConfigEntry) GetMeta() map[string]string { + return p.Meta +} + func (p *ProxyConfigEntry) GetCreateIndex() uint64 { return p.CreateIndex } @@ -160,6 +180,8 @@ func makeConfigEntry(kind, name string) (ConfigEntry, error) { return &IngressGatewayConfigEntry{Kind: kind, Name: name}, nil case TerminatingGateway: return &TerminatingGatewayConfigEntry{Kind: kind, Name: name}, nil + case ServiceIntentions: + return &ServiceIntentionsConfigEntry{Kind: kind, Name: name}, nil default: return nil, fmt.Errorf("invalid config entry kind: %s", kind) } @@ -202,7 +224,10 @@ func DecodeConfigEntry(raw map[string]interface{}) (ConfigEntry, error) { } decodeConf := &mapstructure.DecoderConfig{ - DecodeHook: mapstructure.StringToTimeDurationHookFunc(), + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToTimeHookFunc(time.RFC3339), + ), Result: &entry, WeaklyTypedInput: true, } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go index 209106339f6..5419292fede 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_discoverychain.go @@ -17,10 +17,12 @@ type ServiceRouterConfigEntry struct { ModifyIndex uint64 } -func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceRouterConfigEntry) GetName() string { return e.Name } -func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *ServiceRouterConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceRouterConfigEntry) GetName() string { return e.Name } +func (e *ServiceRouterConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceRouterConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceRouterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceRouterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } type ServiceRoute struct { Match *ServiceRouteMatch `json:",omitempty"` @@ -117,10 +119,12 @@ type ServiceSplitterConfigEntry struct { ModifyIndex uint64 } -func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name } -func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *ServiceSplitterConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceSplitterConfigEntry) GetName() string { return e.Name } +func (e *ServiceSplitterConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceSplitterConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceSplitterConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceSplitterConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } type ServiceSplit struct { Weight float32 @@ -140,6 +144,10 @@ type ServiceResolverConfigEntry struct { Failover map[string]ServiceResolverFailover `json:",omitempty"` ConnectTimeout time.Duration `json:",omitempty" alias:"connect_timeout"` + // LoadBalancer determines the load balancing policy and configuration for services + // issuing requests to this upstream service. + LoadBalancer *LoadBalancer `json:",omitempty" alias:"load_balancer"` + Meta map[string]string `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -181,10 +189,12 @@ func (e *ServiceResolverConfigEntry) UnmarshalJSON(data []byte) error { return nil } -func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind } -func (e *ServiceResolverConfigEntry) GetName() string { return e.Name } -func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } -func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } +func (e *ServiceResolverConfigEntry) GetKind() string { return e.Kind } +func (e *ServiceResolverConfigEntry) GetName() string { return e.Name } +func (e *ServiceResolverConfigEntry) GetNamespace() string { return e.Namespace } +func (e *ServiceResolverConfigEntry) GetMeta() map[string]string { return e.Meta } +func (e *ServiceResolverConfigEntry) GetCreateIndex() uint64 { return e.CreateIndex } +func (e *ServiceResolverConfigEntry) GetModifyIndex() uint64 { return e.ModifyIndex } type ServiceResolverSubset struct { Filter string `json:",omitempty"` @@ -204,3 +214,76 @@ type ServiceResolverFailover struct { Namespace string `json:",omitempty"` Datacenters []string `json:",omitempty"` } + +// LoadBalancer determines the load balancing policy and configuration for services +// issuing requests to this upstream service. +type LoadBalancer struct { + // Policy is the load balancing policy used to select a host + Policy string `json:",omitempty"` + + // RingHashConfig contains configuration for the "ring_hash" policy type + RingHashConfig *RingHashConfig `json:",omitempty" alias:"ring_hash_config"` + + // LeastRequestConfig contains configuration for the "least_request" policy type + LeastRequestConfig *LeastRequestConfig `json:",omitempty" alias:"least_request_config"` + + // HashPolicies is a list of hash policies to use for hashing load balancing algorithms. + // Hash policies are evaluated individually and combined such that identical lists + // result in the same hash. + // If no hash policies are present, or none are successfully evaluated, + // then a random backend host will be selected. + HashPolicies []HashPolicy `json:",omitempty" alias:"hash_policies"` +} + +// RingHashConfig contains configuration for the "ring_hash" policy type +type RingHashConfig struct { + // MinimumRingSize determines the minimum number of entries in the hash ring + MinimumRingSize uint64 `json:",omitempty" alias:"minimum_ring_size"` + + // MaximumRingSize determines the maximum number of entries in the hash ring + MaximumRingSize uint64 `json:",omitempty" alias:"maximum_ring_size"` +} + +// LeastRequestConfig contains configuration for the "least_request" policy type +type LeastRequestConfig struct { + // ChoiceCount determines the number of random healthy hosts from which to select the one with the least requests. + ChoiceCount uint32 `json:",omitempty" alias:"choice_count"` +} + +// HashPolicy defines which attributes will be hashed by hash-based LB algorithms +type HashPolicy struct { + // Field is the attribute type to hash on. + // Must be one of "header","cookie", or "query_parameter". + // Cannot be specified along with SourceIP. + Field string `json:",omitempty"` + + // FieldValue is the value to hash. + // ie. header name, cookie name, URL query parameter name + // Cannot be specified along with SourceIP. + FieldValue string `json:",omitempty" alias:"field_value"` + + // CookieConfig contains configuration for the "cookie" hash policy type. + CookieConfig *CookieConfig `json:",omitempty" alias:"cookie_config"` + + // SourceIP determines whether the hash should be of the source IP rather than of a field and field value. + // Cannot be specified along with Field or FieldValue. + SourceIP bool `json:",omitempty" alias:"source_ip"` + + // Terminal will short circuit the computation of the hash when multiple hash policies are present. + // If a hash is computed when a Terminal policy is evaluated, + // then that hash will be used and subsequent hash policies will be ignored. + Terminal bool `json:",omitempty"` +} + +// CookieConfig contains configuration for the "cookie" hash policy type. +// This is specified to have Envoy generate a cookie for a client on its first request. +type CookieConfig struct { + // Generates a session cookie with no expiration. + Session bool `json:",omitempty"` + + // TTL for generated cookies. Cannot be specified for session cookies. + TTL time.Duration `json:",omitempty"` + + // The path to set for the cookie + Path string `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go index e259427d860..822c093f2bd 100644 --- a/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go +++ b/vendor/github.com/hashicorp/consul/api/config_entry_gateways.go @@ -96,6 +96,14 @@ func (i *IngressGatewayConfigEntry) GetName() string { return i.Name } +func (i *IngressGatewayConfigEntry) GetNamespace() string { + return i.Namespace +} + +func (i *IngressGatewayConfigEntry) GetMeta() map[string]string { + return i.Meta +} + func (i *IngressGatewayConfigEntry) GetCreateIndex() uint64 { return i.CreateIndex } @@ -165,6 +173,14 @@ func (g *TerminatingGatewayConfigEntry) GetName() string { return g.Name } +func (g *TerminatingGatewayConfigEntry) GetNamespace() string { + return g.Namespace +} + +func (g *TerminatingGatewayConfigEntry) GetMeta() map[string]string { + return g.Meta +} + func (g *TerminatingGatewayConfigEntry) GetCreateIndex() uint64 { return g.CreateIndex } diff --git a/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go new file mode 100644 index 00000000000..187a4250610 --- /dev/null +++ b/vendor/github.com/hashicorp/consul/api/config_entry_intentions.go @@ -0,0 +1,80 @@ +package api + +import "time" + +type ServiceIntentionsConfigEntry struct { + Kind string + Name string + Namespace string `json:",omitempty"` + + Sources []*SourceIntention + + Meta map[string]string `json:",omitempty"` + + CreateIndex uint64 + ModifyIndex uint64 +} + +type SourceIntention struct { + Name string + Namespace string `json:",omitempty"` + Action IntentionAction `json:",omitempty"` + Permissions []*IntentionPermission `json:",omitempty"` + Precedence int + Type IntentionSourceType + Description string `json:",omitempty"` + + LegacyID string `json:",omitempty" alias:"legacy_id"` + LegacyMeta map[string]string `json:",omitempty" alias:"legacy_meta"` + LegacyCreateTime *time.Time `json:",omitempty" alias:"legacy_create_time"` + LegacyUpdateTime *time.Time `json:",omitempty" alias:"legacy_update_time"` +} + +func (e *ServiceIntentionsConfigEntry) GetKind() string { + return e.Kind +} + +func (e *ServiceIntentionsConfigEntry) GetName() string { + return e.Name +} + +func (e *ServiceIntentionsConfigEntry) GetNamespace() string { + return e.Namespace +} + +func (e *ServiceIntentionsConfigEntry) GetMeta() map[string]string { + return e.Meta +} + +func (e *ServiceIntentionsConfigEntry) GetCreateIndex() uint64 { + return e.CreateIndex +} + +func (e *ServiceIntentionsConfigEntry) GetModifyIndex() uint64 { + return e.ModifyIndex +} + +type IntentionPermission struct { + Action IntentionAction + HTTP *IntentionHTTPPermission `json:",omitempty"` +} + +type IntentionHTTPPermission struct { + PathExact string `json:",omitempty" alias:"path_exact"` + PathPrefix string `json:",omitempty" alias:"path_prefix"` + PathRegex string `json:",omitempty" alias:"path_regex"` + + Header []IntentionHTTPHeaderPermission `json:",omitempty"` + + Methods []string `json:",omitempty"` +} + +type IntentionHTTPHeaderPermission struct { + Name string + Present bool `json:",omitempty"` + Exact string `json:",omitempty"` + Prefix string `json:",omitempty"` + Suffix string `json:",omitempty"` + Regex string `json:",omitempty"` + Invert bool `json:",omitempty"` +} diff --git a/vendor/github.com/hashicorp/consul/api/connect_intention.go b/vendor/github.com/hashicorp/consul/api/connect_intention.go index 3db177c7b49..26fb6cc4b13 100644 --- a/vendor/github.com/hashicorp/consul/api/connect_intention.go +++ b/vendor/github.com/hashicorp/consul/api/connect_intention.go @@ -12,12 +12,12 @@ import ( // Connect. type Intention struct { // ID is the UUID-based ID for the intention, always generated by Consul. - ID string + ID string `json:",omitempty"` // Description is a human-friendly description of this intention. // It is opaque to Consul and is only stored and transferred in API // requests. - Description string + Description string `json:",omitempty"` // SourceNS, SourceName are the namespace and name, respectively, of // the source service. Either of these may be the wildcard "*", but only @@ -34,16 +34,25 @@ type Intention struct { SourceType IntentionSourceType // Action is whether this is an allowlist or denylist intention. - Action IntentionAction + Action IntentionAction `json:",omitempty"` - // DefaultAddr, DefaultPort of the local listening proxy (if any) to - // make this connection. - DefaultAddr string - DefaultPort int + // Permissions is the list of additional L7 attributes that extend the + // intention definition. + // + // NOTE: This field is not editable unless editing the underlying + // service-intentions config entry directly. + Permissions []*IntentionPermission `json:",omitempty"` + + // DefaultAddr is not used. + // Deprecated: DefaultAddr is not used and may be removed in a future version. + DefaultAddr string `json:",omitempty"` + // DefaultPort is not used. + // Deprecated: DefaultPort is not used and may be removed in a future version. + DefaultPort int `json:",omitempty"` // Meta is arbitrary metadata associated with the intention. This is // opaque to Consul but is served in API responses. - Meta map[string]string + Meta map[string]string `json:",omitempty"` // Precedence is the order that the intention will be applied, with // larger numbers being applied first. This is a read-only field, on @@ -59,7 +68,7 @@ type Intention struct { // This is needed mainly for replication purposes. When replicating from // one DC to another keeping the content Hash will allow us to detect // content changes more efficiently than checking every single field - Hash []byte + Hash []byte `json:",omitempty"` CreateIndex uint64 ModifyIndex uint64 @@ -67,10 +76,20 @@ type Intention struct { // String returns human-friendly output describing ths intention. func (i *Intention) String() string { + var detail string + switch n := len(i.Permissions); n { + case 0: + detail = string(i.Action) + case 1: + detail = "1 permission" + default: + detail = fmt.Sprintf("%d permissions", len(i.Permissions)) + } + return fmt.Sprintf("%s => %s (%s)", i.SourceString(), i.DestinationString(), - i.Action) + detail) } // SourceString returns the namespace/name format for the source, or @@ -164,7 +183,42 @@ func (h *Connect) Intentions(q *QueryOptions) ([]*Intention, *QueryMeta, error) return out, qm, nil } +// IntentionGetExact retrieves a single intention by its unique name instead of +// its ID. +func (h *Connect) IntentionGetExact(source, destination string, q *QueryOptions) (*Intention, *QueryMeta, error) { + r := h.c.newRequest("GET", "/v1/connect/intentions/exact") + r.setQueryOptions(q) + r.params.Set("source", source) + r.params.Set("destination", destination) + rtt, resp, err := h.c.doRequest(r) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + qm := &QueryMeta{} + parseQueryMeta(resp, qm) + qm.RequestTime = rtt + + if resp.StatusCode == 404 { + return nil, qm, nil + } else if resp.StatusCode != 200 { + var buf bytes.Buffer + io.Copy(&buf, resp.Body) + return nil, nil, fmt.Errorf( + "Unexpected response %d: %s", resp.StatusCode, buf.String()) + } + + var out Intention + if err := decodeBody(resp, &out); err != nil { + return nil, nil, err + } + return &out, qm, nil +} + // IntentionGet retrieves a single intention. +// +// Deprecated: use IntentionGetExact instead func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMeta, error) { r := h.c.newRequest("GET", "/v1/connect/intentions/"+id) r.setQueryOptions(q) @@ -194,7 +248,28 @@ func (h *Connect) IntentionGet(id string, q *QueryOptions) (*Intention, *QueryMe return &out, qm, nil } +// IntentionDeleteExact deletes a single intention by its unique name instead of its ID. +func (h *Connect) IntentionDeleteExact(source, destination string, q *WriteOptions) (*WriteMeta, error) { + r := h.c.newRequest("DELETE", "/v1/connect/intentions/exact") + r.setWriteOptions(q) + r.params.Set("source", source) + r.params.Set("destination", destination) + + rtt, resp, err := requireOK(h.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + qm := &WriteMeta{} + qm.RequestTime = rtt + + return qm, nil +} + // IntentionDelete deletes a single intention. +// +// Deprecated: use IntentionDeleteExact instead func (h *Connect) IntentionDelete(id string, q *WriteOptions) (*WriteMeta, error) { r := h.c.newRequest("DELETE", "/v1/connect/intentions/"+id) r.setWriteOptions(q) @@ -268,9 +343,37 @@ func (h *Connect) IntentionCheck(args *IntentionCheck, q *QueryOptions) (bool, * return out.Allowed, qm, nil } +// IntentionUpsert will update an existing intention. The Source & Destination parameters +// in the structure must be non-empty. The ID must be empty. +func (c *Connect) IntentionUpsert(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { + r := c.c.newRequest("PUT", "/v1/connect/intentions/exact") + r.setWriteOptions(q) + r.params.Set("source", maybePrefixNamespace(ixn.SourceNS, ixn.SourceName)) + r.params.Set("destination", maybePrefixNamespace(ixn.DestinationNS, ixn.DestinationName)) + r.obj = ixn + rtt, resp, err := requireOK(c.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + wm := &WriteMeta{} + wm.RequestTime = rtt + return wm, nil +} + +func maybePrefixNamespace(ns, name string) string { + if ns == "" { + return name + } + return ns + "/" + name +} + // IntentionCreate will create a new intention. The ID in the given // structure must be empty and a generate ID will be returned on // success. +// +// Deprecated: use IntentionUpsert instead func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *WriteMeta, error) { r := c.c.newRequest("POST", "/v1/connect/intentions") r.setWriteOptions(q) @@ -293,6 +396,8 @@ func (c *Connect) IntentionCreate(ixn *Intention, q *WriteOptions) (string, *Wri // IntentionUpdate will update an existing intention. The ID in the given // structure must be non-empty. +// +// Deprecated: use IntentionUpsert instead func (c *Connect) IntentionUpdate(ixn *Intention, q *WriteOptions) (*WriteMeta, error) { r := c.c.newRequest("PUT", "/v1/connect/intentions/"+ixn.ID) r.setWriteOptions(q) diff --git a/vendor/github.com/hashicorp/consul/api/discovery_chain.go b/vendor/github.com/hashicorp/consul/api/discovery_chain.go index 75fdbaee255..f67f881c234 100644 --- a/vendor/github.com/hashicorp/consul/api/discovery_chain.go +++ b/vendor/github.com/hashicorp/consul/api/discovery_chain.go @@ -147,6 +147,9 @@ type DiscoveryGraphNode struct { // fields for Type==resolver Resolver *DiscoveryResolver + + // shared by Type==resolver || Type==splitter + LoadBalancer *LoadBalancer `json:",omitempty"` } // compiled form of ServiceRoute diff --git a/vendor/github.com/hashicorp/consul/api/go.mod b/vendor/github.com/hashicorp/consul/api/go.mod index d9902d403e9..89e6e0c94bd 100644 --- a/vendor/github.com/hashicorp/consul/api/go.mod +++ b/vendor/github.com/hashicorp/consul/api/go.mod @@ -5,12 +5,12 @@ go 1.12 replace github.com/hashicorp/consul/sdk => ../sdk require ( - github.com/hashicorp/consul/sdk v0.6.0 + github.com/hashicorp/consul/sdk v0.7.0 github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/go-hclog v0.12.0 github.com/hashicorp/go-rootcerts v1.0.2 github.com/hashicorp/go-uuid v1.0.1 - github.com/hashicorp/serf v0.9.3 + github.com/hashicorp/serf v0.9.5 github.com/mitchellh/mapstructure v1.1.2 github.com/stretchr/testify v1.4.0 ) diff --git a/vendor/github.com/hashicorp/consul/api/go.sum b/vendor/github.com/hashicorp/consul/api/go.sum index 3c26420cd23..57ef543992e 100644 --- a/vendor/github.com/hashicorp/consul/api/go.sum +++ b/vendor/github.com/hashicorp/consul/api/go.sum @@ -41,8 +41,8 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/memberlist v0.2.2 h1:5+RffWKwqJ71YPu9mWsF7ZOscZmwfasdA8kbdC7AO2g= github.com/hashicorp/memberlist v0.2.2/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/hashicorp/serf v0.9.3 h1:AVF6JDQQens6nMHT9OGERBvK0f8rPrAGILnsKLr6lzM= -github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= +github.com/hashicorp/serf v0.9.5 h1:EBWvyu9tcRszt3Bxp3KNssBMP1KuHWyO51lz9+786iM= +github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go index 5cacee8f7e5..221a7add3ca 100644 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -79,6 +79,7 @@ type LockOptions struct { MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime LockTryOnce bool // Optional, defaults to false which means try forever + LockDelay time.Duration // Optional, defaults to 15s Namespace string `json:",omitempty"` // Optional, defaults to API client config, namespace of ACL token, or "default" namespace } @@ -351,8 +352,9 @@ func (l *Lock) createSession() (string, error) { se := l.opts.SessionOpts if se == nil { se = &SessionEntry{ - Name: l.opts.SessionName, - TTL: l.opts.SessionTTL, + Name: l.opts.SessionName, + TTL: l.opts.SessionTTL, + LockDelay: l.opts.LockDelay, } } w := WriteOptions{Namespace: l.opts.Namespace} diff --git a/vendor/github.com/hashicorp/consul/api/namespace.go b/vendor/github.com/hashicorp/consul/api/namespace.go index 875af105f90..49782d2a8c2 100644 --- a/vendor/github.com/hashicorp/consul/api/namespace.go +++ b/vendor/github.com/hashicorp/consul/api/namespace.go @@ -26,7 +26,7 @@ type Namespace struct { // DeletedAt is the time when the Namespace was marked for deletion // This is nullable so that we can omit if empty when encoding in JSON - DeletedAt *time.Time `json:"DeletedAt,omitempty"` + DeletedAt *time.Time `json:"DeletedAt,omitempty" alias:"deleted_at"` // CreateIndex is the Raft index at which the Namespace was created CreateIndex uint64 `json:"CreateIndex,omitempty"` @@ -39,10 +39,10 @@ type Namespace struct { type NamespaceACLConfig struct { // PolicyDefaults is the list of policies that should be used for the parent authorizer // of all tokens in the associated namespace. - PolicyDefaults []ACLLink `json:"PolicyDefaults"` + PolicyDefaults []ACLLink `json:"PolicyDefaults" alias:"policy_defaults"` // RoleDefaults is the list of roles that should be used for the parent authorizer // of all tokens in the associated namespace. - RoleDefaults []ACLLink `json:"RoleDefaults"` + RoleDefaults []ACLLink `json:"RoleDefaults" alias:"role_defaults"` } // Namespaces can be used to manage Namespaces in Consul Enterprise.. diff --git a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go index 0e4ef24649f..57876ee9f68 100644 --- a/vendor/github.com/hashicorp/consul/api/operator_autopilot.go +++ b/vendor/github.com/hashicorp/consul/api/operator_autopilot.go @@ -111,6 +111,122 @@ type OperatorHealthReply struct { Servers []ServerHealth } +type AutopilotState struct { + Healthy bool + FailureTolerance int + OptimisticFailureTolerance int + + Servers map[string]AutopilotServer + Leader string + Voters []string + ReadReplicas []string `json:",omitempty"` + RedundancyZones map[string]AutopilotZone `json:",omitempty"` + Upgrade *AutopilotUpgrade `json:",omitempty"` +} + +type AutopilotServer struct { + ID string + Name string + Address string + NodeStatus string + Version string + LastContact *ReadableDuration + LastTerm uint64 + LastIndex uint64 + Healthy bool + StableSince time.Time + RedundancyZone string `json:",omitempty"` + UpgradeVersion string `json:",omitempty"` + ReadReplica bool + Status AutopilotServerStatus + Meta map[string]string + NodeType AutopilotServerType +} + +type AutopilotServerStatus string + +const ( + AutopilotServerNone AutopilotServerStatus = "none" + AutopilotServerLeader AutopilotServerStatus = "leader" + AutopilotServerVoter AutopilotServerStatus = "voter" + AutopilotServerNonVoter AutopilotServerStatus = "non-voter" + AutopilotServerStaging AutopilotServerStatus = "staging" +) + +type AutopilotServerType string + +const ( + AutopilotTypeVoter AutopilotServerType = "voter" + AutopilotTypeReadReplica AutopilotServerType = "read-replica" + AutopilotTypeZoneVoter AutopilotServerType = "zone-voter" + AutopilotTypeZoneExtraVoter AutopilotServerType = "zone-extra-voter" + AutopilotTypeZoneStandby AutopilotServerType = "zone-standby" +) + +type AutopilotZone struct { + Servers []string + Voters []string + FailureTolerance int +} + +type AutopilotZoneUpgradeVersions struct { + TargetVersionVoters []string `json:",omitempty"` + TargetVersionNonVoters []string `json:",omitempty"` + OtherVersionVoters []string `json:",omitempty"` + OtherVersionNonVoters []string `json:",omitempty"` +} + +type AutopilotUpgrade struct { + Status AutopilotUpgradeStatus + TargetVersion string `json:",omitempty"` + TargetVersionVoters []string `json:",omitempty"` + TargetVersionNonVoters []string `json:",omitempty"` + TargetVersionReadReplicas []string `json:",omitempty"` + OtherVersionVoters []string `json:",omitempty"` + OtherVersionNonVoters []string `json:",omitempty"` + OtherVersionReadReplicas []string `json:",omitempty"` + RedundancyZones map[string]AutopilotZoneUpgradeVersions `json:",omitempty"` +} + +type AutopilotUpgradeStatus string + +const ( + // AutopilotUpgradeIdle is the status when no upgrade is in progress. + AutopilotUpgradeIdle AutopilotUpgradeStatus = "idle" + + // AutopilotUpgradeAwaitNewVoters is the status when more servers of + // the target version must be added in order to start the promotion + // phase of the upgrade + AutopilotUpgradeAwaitNewVoters AutopilotUpgradeStatus = "await-new-voters" + + // AutopilotUpgradePromoting is the status when autopilot is promoting + // servers of the target version. + AutopilotUpgradePromoting AutopilotUpgradeStatus = "promoting" + + // AutopilotUpgradeDemoting is the status when autopilot is demoting + // servers not on the target version + AutopilotUpgradeDemoting AutopilotUpgradeStatus = "demoting" + + // AutopilotUpgradeLeaderTransfer is the status when autopilot is transferring + // leadership from a server running an older version to a server + // using the target version. + AutopilotUpgradeLeaderTransfer AutopilotUpgradeStatus = "leader-transfer" + + // AutopilotUpgradeAwaitNewServers is the status when autpilot has finished + // transferring leadership and has demoted all the other versioned + // servers but wants to indicate that more target version servers + // are needed to replace all the existing other version servers. + AutopilotUpgradeAwaitNewServers AutopilotUpgradeStatus = "await-new-servers" + + // AutopilotUpgradeAwaitServerRemoval is the status when autopilot is waiting + // for the servers on non-target versions to be removed + AutopilotUpgradeAwaitServerRemoval AutopilotUpgradeStatus = "await-server-removal" + + // AutopilotUpgradeDisabled is the status when automated ugprades are + // disabled in the autopilot configuration + AutopilotUpgradeDisabled AutopilotUpgradeStatus = "disabled" +) + // ReadableDuration is a duration type that is serialized to JSON in human readable format. type ReadableDuration time.Duration @@ -230,3 +346,20 @@ func (op *Operator) AutopilotServerHealth(q *QueryOptions) (*OperatorHealthReply } return &out, nil } + +func (op *Operator) AutopilotState(q *QueryOptions) (*AutopilotState, error) { + r := op.c.newRequest("GET", "/v1/operator/autopilot/state") + r.setQueryOptions(q) + _, resp, err := requireOK(op.c.doRequest(r)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var out AutopilotState + if err := decodeBody(resp, &out); err != nil { + return nil, err + } + + return &out, nil +} diff --git a/vendor/github.com/hashicorp/consul/api/status.go b/vendor/github.com/hashicorp/consul/api/status.go index 74ef61a678f..57f379c7b1e 100644 --- a/vendor/github.com/hashicorp/consul/api/status.go +++ b/vendor/github.com/hashicorp/consul/api/status.go @@ -11,8 +11,13 @@ func (c *Client) Status() *Status { } // Leader is used to query for a known leader -func (s *Status) Leader() (string, error) { +func (s *Status) LeaderWithQueryOptions(q *QueryOptions) (string, error) { r := s.c.newRequest("GET", "/v1/status/leader") + + if q != nil { + r.setQueryOptions(q) + } + _, resp, err := requireOK(s.c.doRequest(r)) if err != nil { return "", err @@ -26,9 +31,18 @@ func (s *Status) Leader() (string, error) { return leader, nil } +func (s *Status) Leader() (string, error) { + return s.LeaderWithQueryOptions(nil) +} + // Peers is used to query for a known raft peers -func (s *Status) Peers() ([]string, error) { +func (s *Status) PeersWithQueryOptions(q *QueryOptions) ([]string, error) { r := s.c.newRequest("GET", "/v1/status/peers") + + if q != nil { + r.setQueryOptions(q) + } + _, resp, err := requireOK(s.c.doRequest(r)) if err != nil { return nil, err @@ -41,3 +55,7 @@ func (s *Status) Peers() ([]string, error) { } return peers, nil } + +func (s *Status) Peers() ([]string, error) { + return s.PeersWithQueryOptions(nil) +} diff --git a/vendor/github.com/hashicorp/serf/coordinate/phantom.go b/vendor/github.com/hashicorp/serf/coordinate/phantom.go index 6fb033c0cd3..66da4e2e92e 100644 --- a/vendor/github.com/hashicorp/serf/coordinate/phantom.go +++ b/vendor/github.com/hashicorp/serf/coordinate/phantom.go @@ -11,7 +11,7 @@ import ( // given config. func GenerateClients(nodes int, config *Config) ([]*Client, error) { clients := make([]*Client, nodes) - for i, _ := range clients { + for i := range clients { client, err := NewClient(config) if err != nil { return nil, err @@ -146,7 +146,7 @@ func Simulate(clients []*Client, truth [][]time.Duration, cycles int) { nodes := len(clients) for cycle := 0; cycle < cycles; cycle++ { - for i, _ := range clients { + for i := range clients { if j := rand.Intn(nodes); j != i { c := clients[j].GetCoordinate() rtt := truth[i][j] diff --git a/vendor/github.com/ncw/swift/.travis.yml b/vendor/github.com/ncw/swift/.travis.yml index e0a61643b0d..d43ba945974 100644 --- a/vendor/github.com/ncw/swift/.travis.yml +++ b/vendor/github.com/ncw/swift/.travis.yml @@ -13,18 +13,20 @@ go: - 1.10.x - 1.11.x - 1.12.x + - 1.13.x + - 1.14.x - master matrix: include: - - go: 1.12.x + - go: 1.14.x env: TEST_REAL_SERVER=rackspace - - go: 1.12.x + - go: 1.14.x env: TEST_REAL_SERVER=memset allow_failures: - - go: 1.12.x + - go: 1.14.x env: TEST_REAL_SERVER=rackspace - - go: 1.12.x + - go: 1.14.x env: TEST_REAL_SERVER=memset install: go test -i ./... script: diff --git a/vendor/github.com/ncw/swift/README.md b/vendor/github.com/ncw/swift/README.md index 838ec623e94..1965f73c5dd 100644 --- a/vendor/github.com/ncw/swift/README.md +++ b/vendor/github.com/ncw/swift/README.md @@ -159,3 +159,5 @@ Contributors - Bo - Thiago da Silva - Brandon WELSCH +- Damien Tournoud +- Pedro Kiefer diff --git a/vendor/github.com/ncw/swift/largeobjects.go b/vendor/github.com/ncw/swift/largeobjects.go index bec640b00e0..038bef85a9f 100644 --- a/vendor/github.com/ncw/swift/largeobjects.go +++ b/vendor/github.com/ncw/swift/largeobjects.go @@ -222,7 +222,7 @@ func (c *Connection) LargeObjectDelete(container string, objectName string) erro for i, obj := range objects { filenames[i] = obj[0] + "/" + obj[1] } - _, err = c.doBulkDelete(filenames) + _, err = c.doBulkDelete(filenames, nil) // Don't fail on ObjectNotFound because eventual consistency // makes this situation normal. if err != nil && err != Forbidden && err != ObjectNotFound { diff --git a/vendor/github.com/ncw/swift/swift.go b/vendor/github.com/ncw/swift/swift.go index 217647b9a4d..59b68ce96bb 100644 --- a/vendor/github.com/ncw/swift/swift.go +++ b/vendor/github.com/ncw/swift/swift.go @@ -964,7 +964,7 @@ func (c *Connection) ContainersAll(opts *ContainersOpts) ([]Container, error) { return containers, nil } -// ContainerNamesAll is like ContainerNamess but it returns all the Containers +// ContainerNamesAll is like ContainerNames but it returns all the Containers // // It calls ContainerNames multiple times using the Marker parameter // @@ -1372,6 +1372,13 @@ func (file *ObjectCreateFile) Write(p []byte) (n int, err error) { return } +// CloseWithError closes the object, aborting the upload. +func (file *ObjectCreateFile) CloseWithError(err error) error { + _ = file.pipeWriter.CloseWithError(err) + <-file.done + return nil +} + // Close the object and checks the md5sum if it was required. // // Also returns any other errors from the server (eg container not @@ -1902,22 +1909,26 @@ type BulkDeleteResult struct { Headers Headers // Response HTTP headers. } -func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, err error) { +func (c *Connection) doBulkDelete(objects []string, h Headers) (result BulkDeleteResult, err error) { var buffer bytes.Buffer for _, s := range objects { u := url.URL{Path: s} buffer.WriteString(u.String() + "\n") } + extraHeaders := Headers{ + "Accept": "application/json", + "Content-Type": "text/plain", + "Content-Length": strconv.Itoa(buffer.Len()), + } + for key, value := range h { + extraHeaders[key] = value + } resp, headers, err := c.storage(RequestOpts{ Operation: "DELETE", Parameters: url.Values{"bulk-delete": []string{"1"}}, - Headers: Headers{ - "Accept": "application/json", - "Content-Type": "text/plain", - "Content-Length": strconv.Itoa(buffer.Len()), - }, - ErrorMap: ContainerErrorMap, - Body: &buffer, + Headers: extraHeaders, + ErrorMap: ContainerErrorMap, + Body: &buffer, }) if err != nil { return @@ -1957,6 +1968,18 @@ func (c *Connection) doBulkDelete(objects []string) (result BulkDeleteResult, er // * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html // * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html func (c *Connection) BulkDelete(container string, objectNames []string) (result BulkDeleteResult, err error) { + return c.BulkDeleteHeaders(container, objectNames, nil) +} + +// BulkDeleteHeaders deletes multiple objectNames from container in one operation. +// +// Some servers may not accept bulk-delete requests since bulk-delete is +// an optional feature of swift - these will return the Forbidden error. +// +// See also: +// * http://docs.openstack.org/trunk/openstack-object-storage/admin/content/object-storage-bulk-delete.html +// * http://docs.rackspace.com/files/api/v1/cf-devguide/content/Bulk_Delete-d1e2338.html +func (c *Connection) BulkDeleteHeaders(container string, objectNames []string, h Headers) (result BulkDeleteResult, err error) { if len(objectNames) == 0 { result.Errors = make(map[string]error) return @@ -1965,7 +1988,7 @@ func (c *Connection) BulkDelete(container string, objectNames []string) (result for i, name := range objectNames { fullPaths[i] = fmt.Sprintf("/%s/%s", container, name) } - return c.doBulkDelete(fullPaths) + return c.doBulkDelete(fullPaths, h) } // BulkUploadResult stores results of BulkUpload(). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go index 9b809794212..c46702d60b5 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -15,7 +15,11 @@ package prometheus import ( "errors" + "fmt" + "io/ioutil" "os" + "strconv" + "strings" ) type processCollector struct { @@ -149,3 +153,20 @@ func (c *processCollector) reportError(ch chan<- Metric, desc *Desc, err error) } ch <- NewInvalidMetric(desc, err) } + +// NewPidFileFn returns a function that retrieves a pid from the specified file. +// It is meant to be used for the PidFn field in ProcessCollectorOpts. +func NewPidFileFn(pidFilePath string) func() (int, error) { + return func() (int, error) { + content, err := ioutil.ReadFile(pidFilePath) + if err != nil { + return 0, fmt.Errorf("can't read pid file %q: %+v", pidFilePath, err) + } + pid, err := strconv.Atoi(strings.TrimSpace(string(content))) + if err != nil { + return 0, fmt.Errorf("can't parse pid file %q: %+v", pidFilePath, err) + } + + return pid, nil + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go index 5e1c4546ceb..d86d0cf4b0e 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/http.go @@ -99,7 +99,7 @@ func HandlerFor(reg prometheus.Gatherer, opts HandlerOpts) http.Handler { inFlightSem = make(chan struct{}, opts.MaxRequestsInFlight) } if opts.Registry != nil { - // Initialize all possibilites that can occur below. + // Initialize all possibilities that can occur below. errCnt.WithLabelValues("gathering") errCnt.WithLabelValues("encoding") if err := opts.Registry.Register(errCnt); err != nil { @@ -303,8 +303,12 @@ type Logger interface { // HandlerOpts specifies options how to serve metrics via an http.Handler. The // zero value of HandlerOpts is a reasonable default. type HandlerOpts struct { - // ErrorLog specifies an optional logger for errors collecting and - // serving metrics. If nil, errors are not logged at all. + // ErrorLog specifies an optional Logger for errors collecting and + // serving metrics. If nil, errors are not logged at all. Note that the + // type of a reported error is often prometheus.MultiError, which + // formats into a multi-line error string. If you want to avoid the + // latter, create a Logger implementation that detects a + // prometheus.MultiError and formats the contained errors into one line. ErrorLog Logger // ErrorHandling defines how errors are handled. Note that errors are // logged regardless of the configured ErrorHandling provided ErrorLog diff --git a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go index 9db24380533..ab037db8619 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go @@ -43,14 +43,14 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl // InstrumentHandlerDuration is a middleware that wraps the provided // http.Handler to observe the request duration with the provided ObserverVec. -// The ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request duration in seconds. Partitioning happens by HTTP -// status code and/or HTTP method if the respective instance label names are -// present in the ObserverVec. For unpartitioned observations, use an -// ObserverVec with zero labels. Note that partitioning of Histograms is -// expensive and should be used judiciously. +// The ObserverVec must have valid metric and label names and must have zero, +// one, or two non-const non-curried labels. For those, the only allowed label +// names are "code" and "method". The function panics otherwise. The Observe +// method of the Observer in the ObserverVec is called with the request duration +// in seconds. Partitioning happens by HTTP status code and/or HTTP method if +// the respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // @@ -79,12 +79,13 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht } // InstrumentHandlerCounter is a middleware that wraps the provided http.Handler -// to observe the request result with the provided CounterVec. The CounterVec -// must have zero, one, or two non-const non-curried labels. For those, the only -// allowed label names are "code" and "method". The function panics -// otherwise. Partitioning of the CounterVec happens by HTTP status code and/or -// HTTP method if the respective instance label names are present in the -// CounterVec. For unpartitioned counting, use a CounterVec with zero labels. +// to observe the request result with the provided CounterVec. The CounterVec +// must have valid metric and label names and must have zero, one, or two +// non-const non-curried labels. For those, the only allowed label names are +// "code" and "method". The function panics otherwise. Partitioning of the +// CounterVec happens by HTTP status code and/or HTTP method if the respective +// instance label names are present in the CounterVec. For unpartitioned +// counting, use a CounterVec with zero labels. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // @@ -110,14 +111,15 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) // InstrumentHandlerTimeToWriteHeader is a middleware that wraps the provided // http.Handler to observe with the provided ObserverVec the request duration -// until the response headers are written. The ObserverVec must have zero, one, -// or two non-const non-curried labels. For those, the only allowed label names -// are "code" and "method". The function panics otherwise. The Observe method of -// the Observer in the ObserverVec is called with the request duration in -// seconds. Partitioning happens by HTTP status code and/or HTTP method if the -// respective instance label names are present in the ObserverVec. For -// unpartitioned observations, use an ObserverVec with zero labels. Note that -// partitioning of Histograms is expensive and should be used judiciously. +// until the response headers are written. The ObserverVec must have valid +// metric and label names and must have zero, one, or two non-const non-curried +// labels. For those, the only allowed label names are "code" and "method". The +// function panics otherwise. The Observe method of the Observer in the +// ObserverVec is called with the request duration in seconds. Partitioning +// happens by HTTP status code and/or HTTP method if the respective instance +// label names are present in the ObserverVec. For unpartitioned observations, +// use an ObserverVec with zero labels. Note that partitioning of Histograms is +// expensive and should be used judiciously. // // If the wrapped Handler panics before calling WriteHeader, no value is // reported. @@ -139,15 +141,15 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha } // InstrumentHandlerRequestSize is a middleware that wraps the provided -// http.Handler to observe the request size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the request size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. +// http.Handler to observe the request size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the request size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // @@ -174,15 +176,15 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) } // InstrumentHandlerResponseSize is a middleware that wraps the provided -// http.Handler to observe the response size with the provided ObserverVec. The -// ObserverVec must have zero, one, or two non-const non-curried labels. For -// those, the only allowed label names are "code" and "method". The function -// panics otherwise. The Observe method of the Observer in the ObserverVec is -// called with the response size in bytes. Partitioning happens by HTTP status -// code and/or HTTP method if the respective instance label names are present in -// the ObserverVec. For unpartitioned observations, use an ObserverVec with zero -// labels. Note that partitioning of Histograms is expensive and should be used -// judiciously. +// http.Handler to observe the response size with the provided ObserverVec. The +// ObserverVec must have valid metric and label names and must have zero, one, +// or two non-const non-curried labels. For those, the only allowed label names +// are "code" and "method". The function panics otherwise. The Observe method of +// the Observer in the ObserverVec is called with the response size in +// bytes. Partitioning happens by HTTP status code and/or HTTP method if the +// respective instance label names are present in the ObserverVec. For +// unpartitioned observations, use an ObserverVec with zero labels. Note that +// partitioning of Histograms is expensive and should be used judiciously. // // If the wrapped Handler does not set a status code, a status code of 200 is assumed. // @@ -198,6 +200,11 @@ func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler }) } +// checkLabels returns whether the provided Collector has a non-const, +// non-curried label named "code" and/or "method". It panics if the provided +// Collector does not have a Desc or has more than one Desc or its Desc is +// invalid. It also panics if the Collector has any non-const, non-curried +// labels that are not named "code" or "method". func checkLabels(c prometheus.Collector) (code bool, method bool) { // TODO(beorn7): Remove this hacky way to check for instance labels // once Descriptors can have their dimensionality queried. @@ -225,6 +232,10 @@ func checkLabels(c prometheus.Collector) (code bool, method bool) { close(descc) + // Make sure the Collector has a valid Desc by registering it with a + // temporary registry. + prometheus.NewRegistry().MustRegister(c) + // Create a ConstMetric with the Desc. Since we don't know how many // variable labels there are, try for as long as it needs. for err := errors.New("dummy"); err != nil; lvs = append(lvs, magicString) { diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go index ba94405af4c..48f5ef9d72f 100644 --- a/vendor/github.com/prometheus/client_golang/prometheus/registry.go +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -215,6 +215,8 @@ func (err AlreadyRegisteredError) Error() string { // by a Gatherer to report multiple errors during MetricFamily gathering. type MultiError []error +// Error formats the contained errors as a bullet point list, preceded by the +// total number of errors. Note that this results in a multi-line string. func (errs MultiError) Error() string { if len(errs) == 0 { return "" diff --git a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go index d86ba945a2b..7b388ca71d4 100644 --- a/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go +++ b/vendor/github.com/prometheus/prometheus/discovery/dns/dns.go @@ -211,8 +211,11 @@ func (d *Discovery) refreshOne(ctx context.Context, name string, ch chan<- *targ target = hostPort(addr.A.String(), d.port) case *dns.AAAA: target = hostPort(addr.AAAA.String(), d.port) + case *dns.CNAME: + // CNAME responses can occur with "Type: A" dns_sd_config requests. + continue default: - level.Warn(d.logger).Log("msg", "Invalid SRV record", "record", record) + level.Warn(d.logger).Log("msg", "Invalid record", "record", record) continue } tg.Targets = append(tg.Targets, model.LabelSet{ diff --git a/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go b/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go index a7f03b0caf1..93458f644d1 100644 --- a/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go +++ b/vendor/github.com/prometheus/prometheus/pkg/timestamp/timestamp.go @@ -13,7 +13,10 @@ package timestamp -import "time" +import ( + "math" + "time" +) // FromTime returns a new millisecond timestamp from a time. func FromTime(t time.Time) int64 { @@ -24,3 +27,8 @@ func FromTime(t time.Time) int64 { func Time(ts int64) time.Time { return time.Unix(ts/1000, (ts%1000)*int64(time.Millisecond)).UTC() } + +// FromFloatSeconds returns a millisecond timestamp from float seconds. +func FromFloatSeconds(ts float64) int64 { + return int64(math.Round(ts * 1000)) +} diff --git a/vendor/github.com/prometheus/prometheus/promql/engine.go b/vendor/github.com/prometheus/prometheus/promql/engine.go index 81bd70d5fbe..4486313e411 100644 --- a/vendor/github.com/prometheus/prometheus/promql/engine.go +++ b/vendor/github.com/prometheus/prometheus/promql/engine.go @@ -208,6 +208,9 @@ type EngineOpts struct { // NoStepSubqueryIntervalFn is the default evaluation interval of // a subquery in milliseconds if no step in range vector was specified `[30m:]`. NoStepSubqueryIntervalFn func(rangeMillis int64) int64 + + // EnableAtModifier if true enables @ modifier. Disabled otherwise. + EnableAtModifier bool } // Engine handles the lifetime of queries from beginning to end. @@ -222,6 +225,7 @@ type Engine struct { queryLoggerLock sync.RWMutex lookbackDelta time.Duration noStepSubqueryIntervalFn func(rangeMillis int64) int64 + enableAtModifier bool } // NewEngine returns a new engine. @@ -302,6 +306,7 @@ func NewEngine(opts EngineOpts) *Engine { activeQueryTracker: opts.ActiveQueryTracker, lookbackDelta: opts.LookbackDelta, noStepSubqueryIntervalFn: opts.NoStepSubqueryIntervalFn, + enableAtModifier: opts.EnableAtModifier, } } @@ -334,7 +339,10 @@ func (ng *Engine) NewInstantQuery(q storage.Queryable, qs string, ts time.Time) if err != nil { return nil, err } - qry := ng.newQuery(q, expr, ts, ts, 0) + qry, err := ng.newQuery(q, expr, ts, ts, 0) + if err != nil { + return nil, err + } qry.q = qs return qry, nil @@ -350,15 +358,22 @@ func (ng *Engine) NewRangeQuery(q storage.Queryable, qs string, start, end time. if expr.Type() != parser.ValueTypeVector && expr.Type() != parser.ValueTypeScalar { return nil, errors.Errorf("invalid expression type %q for range query, must be Scalar or instant Vector", parser.DocumentedType(expr.Type())) } - qry := ng.newQuery(q, expr, start, end, interval) + qry, err := ng.newQuery(q, expr, start, end, interval) + if err != nil { + return nil, err + } qry.q = qs return qry, nil } -func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end time.Time, interval time.Duration) *query { +func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end time.Time, interval time.Duration) (*query, error) { + if err := ng.validateOpts(expr); err != nil { + return nil, err + } + es := &parser.EvalStmt{ - Expr: expr, + Expr: WrapWithStepInvariantExpr(expr), Start: start, End: end, Interval: interval, @@ -369,7 +384,39 @@ func (ng *Engine) newQuery(q storage.Queryable, expr parser.Expr, start, end tim stats: stats.NewQueryTimers(), queryable: q, } - return qry + return qry, nil +} + +func (ng *Engine) validateOpts(expr parser.Expr) error { + if ng.enableAtModifier { + return nil + } + + var validationErr error + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + if n.Timestamp != nil { + validationErr = errors.New("@ modifier is disabled") + return validationErr + } + + case *parser.MatrixSelector: + if n.VectorSelector.(*parser.VectorSelector).Timestamp != nil { + validationErr = errors.New("@ modifier is disabled") + return validationErr + } + + case *parser.SubqueryExpr: + if n.Timestamp != nil { + validationErr = errors.New("@ modifier is disabled") + return validationErr + } + } + return nil + }) + + return validationErr } func (ng *Engine) newTestQuery(f func(context.Context) error) Query { @@ -477,8 +524,8 @@ func durationMilliseconds(d time.Duration) int64 { // execEvalStmt evaluates the expression of an evaluation statement for the given time range. func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.EvalStmt) (parser.Value, storage.Warnings, error) { prepareSpanTimer, ctxPrepare := query.stats.GetSpanTimer(ctx, stats.QueryPreparationTime, ng.metrics.queryPrepareTime) - mint := ng.findMinTime(s) - querier, err := query.queryable.Querier(ctxPrepare, timestamp.FromTime(mint), timestamp.FromTime(s.End)) + mint, maxt := ng.findMinMaxTime(s) + querier, err := query.queryable.Querier(ctxPrepare, mint, maxt) if err != nil { prepareSpanTimer.Finish() return nil, nil, err @@ -488,6 +535,9 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval ng.populateSeries(querier, s) prepareSpanTimer.Finish() + // Modify the offset of vector and matrix selectors for the @ modifier + // w.r.t. the start time since only 1 evaluation will be done on them. + setOffsetForAtModifier(timeMilliseconds(s.Start), s.Expr) evalSpanTimer, ctxInnerEval := query.stats.GetSpanTimer(ctx, stats.InnerEvalTime, ng.metrics.queryInnerEval) // Instant evaluation. This is executed as a range evaluation with one step. if s.Start == s.End && s.Interval == 0 { @@ -576,45 +626,102 @@ func (ng *Engine) execEvalStmt(ctx context.Context, query *query, s *parser.Eval return mat, warnings, nil } -// subqueryOffsetRange returns the sum of offsets and ranges of all subqueries in the path. -func (ng *Engine) subqueryOffsetRange(path []parser.Node) (time.Duration, time.Duration) { +// subqueryTimes returns the sum of offsets and ranges of all subqueries in the path. +// If the @ modifier is used, then the offset and range is w.r.t. that timestamp +// (i.e. the sum is reset when we have @ modifier). +// The returned *int64 is the closest timestamp that was seen. nil for no @ modifier. +func subqueryTimes(path []parser.Node) (time.Duration, time.Duration, *int64) { var ( - subqOffset time.Duration - subqRange time.Duration + subqOffset, subqRange time.Duration + ts int64 = math.MaxInt64 ) for _, node := range path { switch n := node.(type) { case *parser.SubqueryExpr: - subqOffset += n.Offset + subqOffset += n.OriginalOffset subqRange += n.Range + if n.Timestamp != nil { + // The @ modifier on subquery invalidates all the offset and + // range till now. Hence resetting it here. + subqOffset = n.OriginalOffset + subqRange = n.Range + ts = *n.Timestamp + } } } - return subqOffset, subqRange + var tsp *int64 + if ts != math.MaxInt64 { + tsp = &ts + } + return subqOffset, subqRange, tsp } -func (ng *Engine) findMinTime(s *parser.EvalStmt) time.Time { - var maxOffset time.Duration +func (ng *Engine) findMinMaxTime(s *parser.EvalStmt) (int64, int64) { + var minTimestamp, maxTimestamp int64 = math.MaxInt64, math.MinInt64 + // Whenever a MatrixSelector is evaluated, evalRange is set to the corresponding range. + // The evaluation of the VectorSelector inside then evaluates the given range and unsets + // the variable. + var evalRange time.Duration parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { - subqOffset, subqRange := ng.subqueryOffsetRange(path) switch n := node.(type) { case *parser.VectorSelector: - if maxOffset < ng.lookbackDelta+subqOffset+subqRange { - maxOffset = ng.lookbackDelta + subqOffset + subqRange + start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) + if start < minTimestamp { + minTimestamp = start } - if n.Offset+ng.lookbackDelta+subqOffset+subqRange > maxOffset { - maxOffset = n.Offset + ng.lookbackDelta + subqOffset + subqRange + if end > maxTimestamp { + maxTimestamp = end } + evalRange = 0 + case *parser.MatrixSelector: - if maxOffset < n.Range+subqOffset+subqRange { - maxOffset = n.Range + subqOffset + subqRange - } - if m := n.VectorSelector.(*parser.VectorSelector).Offset + n.Range + subqOffset + subqRange; m > maxOffset { - maxOffset = m - } + evalRange = n.Range } return nil }) - return s.Start.Add(-maxOffset) + + if maxTimestamp == math.MinInt64 { + // This happens when there was no selector. Hence no time range to select. + minTimestamp = 0 + maxTimestamp = 0 + } + + return minTimestamp, maxTimestamp +} + +func (ng *Engine) getTimeRangesForSelector(s *parser.EvalStmt, n *parser.VectorSelector, path []parser.Node, evalRange time.Duration) (int64, int64) { + start, end := timestamp.FromTime(s.Start), timestamp.FromTime(s.End) + subqOffset, subqRange, subqTs := subqueryTimes(path) + + if subqTs != nil { + // The timestamp on the subquery overrides the eval statement time ranges. + start = *subqTs + end = *subqTs + } + + if n.Timestamp != nil { + // The timestamp on the selector overrides everything. + start = *n.Timestamp + end = *n.Timestamp + } else { + offsetMilliseconds := durationMilliseconds(subqOffset) + start = start - offsetMilliseconds - durationMilliseconds(subqRange) + end = end - offsetMilliseconds + } + + if evalRange == 0 { + start = start - durationMilliseconds(ng.lookbackDelta) + } else { + // For all matrix queries we want to ensure that we have (end-start) + range selected + // this way we have `range` data before the start time + start = start - durationMilliseconds(evalRange) + } + + offsetMilliseconds := durationMilliseconds(n.OriginalOffset) + start = start - offsetMilliseconds + end = end - offsetMilliseconds + + return start, end } func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { @@ -626,40 +733,18 @@ func (ng *Engine) populateSeries(querier storage.Querier, s *parser.EvalStmt) { parser.Inspect(s.Expr, func(node parser.Node, path []parser.Node) error { switch n := node.(type) { case *parser.VectorSelector: + start, end := ng.getTimeRangesForSelector(s, n, path, evalRange) hints := &storage.SelectHints{ - Start: timestamp.FromTime(s.Start), - End: timestamp.FromTime(s.End), + Start: start, + End: end, Step: durationMilliseconds(s.Interval), + Range: durationMilliseconds(evalRange), + Func: extractFuncFromPath(path), } - - // We need to make sure we select the timerange selected by the subquery. - // The subqueryOffsetRange function gives the sum of range and the - // sum of offset. - // TODO(bwplotka): Add support for better hints when subquerying. See: https://github.com/prometheus/prometheus/issues/7630. - subqOffset, subqRange := ng.subqueryOffsetRange(path) - offsetMilliseconds := durationMilliseconds(subqOffset) - hints.Start = hints.Start - offsetMilliseconds - durationMilliseconds(subqRange) - hints.End = hints.End - offsetMilliseconds - - if evalRange == 0 { - hints.Start = hints.Start - durationMilliseconds(ng.lookbackDelta) - } else { - hints.Range = durationMilliseconds(evalRange) - // For all matrix queries we want to ensure that we have (end-start) + range selected - // this way we have `range` data before the start time - hints.Start = hints.Start - durationMilliseconds(evalRange) - evalRange = 0 - } - - hints.Func = extractFuncFromPath(path) + evalRange = 0 hints.By, hints.Grouping = extractGroupsFromPath(path) - if n.Offset > 0 { - offsetMilliseconds := durationMilliseconds(n.Offset) - hints.Start = hints.Start - offsetMilliseconds - hints.End = hints.End - offsetMilliseconds - } - n.UnexpandedSeriesSet = querier.Select(false, hints, n.LabelMatchers...) + case *parser.MatrixSelector: evalRange = n.Range } @@ -852,7 +937,7 @@ func (enh *EvalNodeHelper) signatureFunc(on bool, names ...string) func(labels.L // the given function with the values computed for each expression at that // step. The return value is the combination into time series of all the // function call results. -func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { +func (ev *evaluator) rangeEval(funcCall func([]parser.Value, *EvalNodeHelper) (Vector, storage.Warnings), exprs ...parser.Expr) (Matrix, storage.Warnings) { numSteps := int((ev.endTimestamp-ev.startTimestamp)/ev.interval) + 1 matrixes := make([]Matrix, len(exprs)) origMatrixes := make([]Matrix, len(exprs)) @@ -917,7 +1002,7 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector, } // Make the function call. enh.Ts = ts - result, ws := f(args, enh) + result, ws := funcCall(args, enh) if result.ContainsSameLabelset() { ev.errorf("vector cannot contain metrics with the same labelset") } @@ -978,21 +1063,30 @@ func (ev *evaluator) rangeEval(f func([]parser.Value, *EvalNodeHelper) (Vector, // evalSubquery evaluates given SubqueryExpr and returns an equivalent // evaluated MatrixSelector in its place. Note that the Name and LabelMatchers are not set. -func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, storage.Warnings) { +func (ev *evaluator) evalSubquery(subq *parser.SubqueryExpr) (*parser.MatrixSelector, int, storage.Warnings) { val, ws := ev.eval(subq) mat := val.(Matrix) vs := &parser.VectorSelector{ - Offset: subq.Offset, - Series: make([]storage.Series, 0, len(mat)), + OriginalOffset: subq.OriginalOffset, + Offset: subq.Offset, + Series: make([]storage.Series, 0, len(mat)), + Timestamp: subq.Timestamp, + } + if subq.Timestamp != nil { + // The offset of subquery is not modified in case of @ modifier. + // Hence we take care of that here for the result. + vs.Offset = subq.OriginalOffset + time.Duration(ev.startTimestamp-*subq.Timestamp)*time.Millisecond } ms := &parser.MatrixSelector{ Range: subq.Range, VectorSelector: vs, } + totalSamples := 0 for _, s := range mat { + totalSamples += len(s.Points) vs.Series = append(vs.Series, NewStorageSeries(s)) } - return ms, ws + return ms, totalSamples, ws } // eval evaluates the given expression as the given AST expression node requires. @@ -1007,7 +1101,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { switch e := expr.(type) { case *parser.AggregateExpr: unwrapParenExpr(&e.Param) - if s, ok := e.Param.(*parser.StringLiteral); ok { + if s, ok := unwrapStepInvariantExpr(e.Param).(*parser.StringLiteral); ok { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) { return ev.aggregation(e.Op, e.Grouping, e.Without, s.Val, v[0].(Vector), enh), nil }, e.Expr) @@ -1022,13 +1116,13 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { case *parser.Call: call := FunctionCalls[e.Func.Name] - if e.Func.Name == "timestamp" { // Matrix evaluation always returns the evaluation time, // so this function needs special handling when given // a vector selector. unwrapParenExpr(&e.Args[0]) - vs, ok := e.Args[0].(*parser.VectorSelector) + arg := unwrapStepInvariantExpr(e.Args[0]) + vs, ok := arg.(*parser.VectorSelector) if ok { return ev.rangeEval(func(v []parser.Value, enh *EvalNodeHelper) (Vector, storage.Warnings) { val, ws := ev.vectorSelector(vs, enh.Ts) @@ -1045,7 +1139,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { ) for i := range e.Args { unwrapParenExpr(&e.Args[i]) - a := e.Args[i] + a := unwrapStepInvariantExpr(e.Args[i]) if _, ok := a.(*parser.MatrixSelector); ok { matrixArgIndex = i matrixArg = true @@ -1056,9 +1150,14 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { matrixArgIndex = i matrixArg = true // Replacing parser.SubqueryExpr with parser.MatrixSelector. - val, ws := ev.evalSubquery(subq) + val, totalSamples, ws := ev.evalSubquery(subq) e.Args[i] = val warnings = append(warnings, ws...) + defer func() { + // subquery result takes space in the memory. Get rid of that at the end. + val.VectorSelector.(*parser.VectorSelector).Series = nil + ev.currentSamples -= totalSamples + }() break } } @@ -1083,7 +1182,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { } } - sel := e.Args[matrixArgIndex].(*parser.MatrixSelector) + sel := unwrapStepInvariantExpr(e.Args[matrixArgIndex]).(*parser.MatrixSelector) selVS := sel.VectorSelector.(*parser.VectorSelector) ws, err := checkAndExpandSeriesSet(ev.ctx, sel) @@ -1146,7 +1245,7 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { it.ReduceDelta(stepRange) } if len(ss.Points) > 0 { - if ev.currentSamples < ev.maxSamples { + if ev.currentSamples+len(ss.Points) <= ev.maxSamples { mat = append(mat, ss) ev.currentSamples += len(ss.Points) } else { @@ -1266,6 +1365,9 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { return append(enh.Out, Sample{Point: Point{V: e.Val}}), nil }) + case *parser.StringLiteral: + return String{V: e.Val, T: ev.startTimestamp}, nil + case *parser.VectorSelector: ws, err := checkAndExpandSeriesSet(ev.ctx, e) if err != nil { @@ -1332,11 +1434,65 @@ func (ev *evaluator) eval(expr parser.Expr) (parser.Value, storage.Warnings) { newEv.startTimestamp += newEv.interval } + if newEv.startTimestamp != ev.startTimestamp { + // Adjust the offset of selectors based on the new + // start time of the evaluator since the calculation + // of the offset with @ happens w.r.t. the start time. + setOffsetForAtModifier(newEv.startTimestamp, e.Expr) + } + res, ws := newEv.eval(e.Expr) ev.currentSamples = newEv.currentSamples return res, ws - case *parser.StringLiteral: - return String{V: e.Val, T: ev.startTimestamp}, nil + case *parser.StepInvariantExpr: + switch ce := e.Expr.(type) { + case *parser.StringLiteral, *parser.NumberLiteral: + return ev.eval(ce) + } + + newEv := &evaluator{ + startTimestamp: ev.startTimestamp, + endTimestamp: ev.startTimestamp, // Always a single evaluation. + interval: ev.interval, + ctx: ev.ctx, + currentSamples: ev.currentSamples, + maxSamples: ev.maxSamples, + logger: ev.logger, + lookbackDelta: ev.lookbackDelta, + noStepSubqueryIntervalFn: ev.noStepSubqueryIntervalFn, + } + res, ws := newEv.eval(e.Expr) + ev.currentSamples = newEv.currentSamples + switch e.Expr.(type) { + case *parser.MatrixSelector, *parser.SubqueryExpr: + // We do not duplicate results for range selectors since result is a matrix + // with their unique timestamps which does not depend on the step. + return res, ws + } + + // For every evaluation while the value remains same, the timestamp for that + // value would change for different eval times. Hence we duplicate the result + // with changed timestamps. + mat, ok := res.(Matrix) + if !ok { + panic(errors.Errorf("unexpected result in StepInvariantExpr evaluation: %T", expr)) + } + for i := range mat { + if len(mat[i].Points) != 1 { + panic(errors.Errorf("unexpected number of samples")) + } + for ts := ev.startTimestamp + ev.interval; ts <= ev.endTimestamp; ts = ts + ev.interval { + mat[i].Points = append(mat[i].Points, Point{ + T: ts, + V: mat[i].Points[0].V, + }) + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } + } + } + return res, ws } panic(errors.Errorf("unhandled expression of type: %T", expr)) @@ -1359,12 +1515,13 @@ func (ev *evaluator) vectorSelector(node *parser.VectorSelector, ts int64) (Vect Metric: node.Series[i].Labels(), Point: Point{V: v, T: t}, }) + ev.currentSamples++ + if ev.currentSamples > ev.maxSamples { + ev.error(ErrTooManySamples(env)) + } } - if ev.currentSamples >= ev.maxSamples { - ev.error(ErrTooManySamples(env)) - } } return vec, ws } @@ -1497,8 +1654,8 @@ func (ev *evaluator) matrixIterSlice(it *storage.BufferedSeriesIterator, mint, m if ev.currentSamples >= ev.maxSamples { ev.error(ErrTooManySamples(env)) } - out = append(out, Point{T: t, V: v}) ev.currentSamples++ + out = append(out, Point{T: t, V: v}) } } // The seeked sample might also be in the range. @@ -2141,3 +2298,141 @@ func unwrapParenExpr(e *parser.Expr) { } } } + +func unwrapStepInvariantExpr(e parser.Expr) parser.Expr { + if p, ok := e.(*parser.StepInvariantExpr); ok { + return p.Expr + } + return e +} + +// WrapWithStepInvariantExpr wraps all possible parts of the given +// expression with StepInvariantExpr wherever valid. +func WrapWithStepInvariantExpr(expr parser.Expr) parser.Expr { + isStepInvariant := wrapWithStepInvariantExprHelper(expr) + if isStepInvariant { + return newStepInvariantExpr(expr) + } + return expr +} + +// wrapWithStepInvariantExprHelper wraps the child nodes of the expression +// with a StepInvariantExpr wherever valid. The returned boolean is true if the +// passed expression qualifies to be wrapped by StepInvariantExpr. +func wrapWithStepInvariantExprHelper(expr parser.Expr) bool { + switch n := expr.(type) { + case *parser.VectorSelector: + return n.Timestamp != nil + + case *parser.AggregateExpr: + return wrapWithStepInvariantExprHelper(n.Expr) + + case *parser.BinaryExpr: + isInvariant1, isInvariant2 := wrapWithStepInvariantExprHelper(n.LHS), wrapWithStepInvariantExprHelper(n.RHS) + if isInvariant1 && isInvariant2 { + return true + } + + if isInvariant1 { + n.LHS = newStepInvariantExpr(n.LHS) + } + if isInvariant2 { + n.RHS = newStepInvariantExpr(n.RHS) + } + + return false + + case *parser.Call: + _, ok := AtModifierUnsafeFunctions[n.Func.Name] + isStepInvariant := !ok + isStepInvariantSlice := make([]bool, len(n.Args)) + for i := range n.Args { + isStepInvariantSlice[i] = wrapWithStepInvariantExprHelper(n.Args[i]) + isStepInvariant = isStepInvariant && isStepInvariantSlice[i] + } + + if isStepInvariant { + + // The function and all arguments are step invariant. + return true + } + + for i, isi := range isStepInvariantSlice { + if isi { + n.Args[i] = newStepInvariantExpr(n.Args[i]) + } + } + return false + + case *parser.MatrixSelector: + return n.VectorSelector.(*parser.VectorSelector).Timestamp != nil + + case *parser.SubqueryExpr: + // Since we adjust offset for the @ modifier evaluation, + // it gets tricky to adjust it for every subquery step. + // Hence we wrap the inside of subquery irrespective of + // @ on subquery (given it is also step invariant) so that + // it is evaluated only once w.r.t. the start time of subquery. + isInvariant := wrapWithStepInvariantExprHelper(n.Expr) + if isInvariant { + n.Expr = newStepInvariantExpr(n.Expr) + } + return n.Timestamp != nil + + case *parser.ParenExpr: + return wrapWithStepInvariantExprHelper(n.Expr) + + case *parser.UnaryExpr: + return wrapWithStepInvariantExprHelper(n.Expr) + + case *parser.StringLiteral, *parser.NumberLiteral: + return true + } + + panic(fmt.Sprintf("found unexpected node %#v", expr)) +} + +func newStepInvariantExpr(expr parser.Expr) parser.Expr { + if e, ok := expr.(*parser.ParenExpr); ok { + // Wrapping the inside of () makes it easy to unwrap the paren later. + // But this effectively unwraps the paren. + return newStepInvariantExpr(e.Expr) + + } + return &parser.StepInvariantExpr{Expr: expr} +} + +// setOffsetForAtModifier modifies the offset of vector and matrix selector +// and subquery in the tree to accommodate the timestamp of @ modifier. +// The offset is adjusted w.r.t. the given evaluation time. +func setOffsetForAtModifier(evalTime int64, expr parser.Expr) { + getOffset := func(ts *int64, originalOffset time.Duration, path []parser.Node) time.Duration { + if ts == nil { + return originalOffset + } + + subqOffset, _, subqTs := subqueryTimes(path) + if subqTs != nil { + subqOffset += time.Duration(evalTime-*subqTs) * time.Millisecond + } + + offsetForTs := time.Duration(evalTime-*ts) * time.Millisecond + offsetDiff := offsetForTs - subqOffset + return originalOffset + offsetDiff + } + + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + switch n := node.(type) { + case *parser.VectorSelector: + n.Offset = getOffset(n.Timestamp, n.OriginalOffset, path) + + case *parser.MatrixSelector: + vs := n.VectorSelector.(*parser.VectorSelector) + vs.Offset = getOffset(vs.Timestamp, vs.OriginalOffset, path) + + case *parser.SubqueryExpr: + n.Offset = getOffset(n.Timestamp, n.OriginalOffset, path) + } + return nil + }) +} diff --git a/vendor/github.com/prometheus/prometheus/promql/functions.go b/vendor/github.com/prometheus/prometheus/promql/functions.go index 0499e8f15c7..3a96a9ecec0 100644 --- a/vendor/github.com/prometheus/prometheus/promql/functions.go +++ b/vendor/github.com/prometheus/prometheus/promql/functions.go @@ -59,7 +59,6 @@ func funcTime(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) func extrapolatedRate(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper, isCounter bool, isRate bool) Vector { ms := args[0].(*parser.MatrixSelector) vs := ms.VectorSelector.(*parser.VectorSelector) - var ( samples = vals[0].(Matrix)[0] rangeStart = enh.Ts - durationMilliseconds(ms.Range+vs.Offset) @@ -598,7 +597,6 @@ func funcDeriv(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper func funcPredictLinear(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { samples := vals[0].(Matrix)[0] duration := vals[1].(Vector)[0].V - // No sense in trying to predict anything without at least two points. // Drop this Vector element. if len(samples.Points) < 2 { @@ -701,10 +699,10 @@ func funcChanges(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelp func funcLabelReplace(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { var ( vector = vals[0].(Vector) - dst = args[1].(*parser.StringLiteral).Val - repl = args[2].(*parser.StringLiteral).Val - src = args[3].(*parser.StringLiteral).Val - regexStr = args[4].(*parser.StringLiteral).Val + dst = stringFromArg(args[1]) + repl = stringFromArg(args[2]) + src = stringFromArg(args[3]) + regexStr = stringFromArg(args[4]) ) if enh.regex == nil { @@ -764,8 +762,8 @@ func funcVector(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelpe func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHelper) Vector { var ( vector = vals[0].(Vector) - dst = args[1].(*parser.StringLiteral).Val - sep = args[2].(*parser.StringLiteral).Val + dst = stringFromArg(args[1]) + sep = stringFromArg(args[2]) srcLabels = make([]string, len(args)-3) ) @@ -774,7 +772,7 @@ func funcLabelJoin(vals []parser.Value, args parser.Expressions, enh *EvalNodeHe } for i := 3; i < len(args); i++ { - src := args[i].(*parser.StringLiteral).Val + src := stringFromArg(args[i]) if !model.LabelName(src).IsValid() { panic(errors.Errorf("invalid source label name in label_join(): %s", src)) } @@ -938,6 +936,21 @@ var FunctionCalls = map[string]FunctionCall{ "year": funcYear, } +// AtModifierUnsafeFunctions are the functions whose result +// can vary if evaluation time is changed when the arguments are +// step invariant. It also includes functions that use the timestamps +// of the passed instant vector argument to calculate a result since +// that can also change with change in eval time. +var AtModifierUnsafeFunctions = map[string]struct{}{ + // Step invariant functions. + "days_in_month": {}, "day_of_month": {}, "day_of_week": {}, + "hour": {}, "minute": {}, "month": {}, "year": {}, + "predict_linear": {}, "time": {}, + // Uses timestamp of the argument for the result, + // hence unsafe to use with @ modifier. + "timestamp": {}, +} + type vectorByValueHeap Vector func (s vectorByValueHeap) Len() int { @@ -1028,3 +1041,7 @@ func createLabelsForAbsentFunction(expr parser.Expr) labels.Labels { } return m } + +func stringFromArg(e parser.Expr) string { + return unwrapStepInvariantExpr(e).(*parser.StringLiteral).Val +} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go index de82d672546..459f1f47f55 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/ast.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/ast.go @@ -125,10 +125,17 @@ type MatrixSelector struct { // SubqueryExpr represents a subquery. type SubqueryExpr struct { - Expr Expr - Range time.Duration - Offset time.Duration - Step time.Duration + Expr Expr + Range time.Duration + // OriginalOffset is the actual offset that was set in the query. + // This never changes. + OriginalOffset time.Duration + // Offset is the offset used during the query execution + // which is calculated using the original offset, at modifier time, + // eval time, and subquery offsets in the AST tree. + Offset time.Duration + Timestamp *int64 + Step time.Duration EndPos Pos } @@ -162,10 +169,28 @@ type UnaryExpr struct { StartPos Pos } +// StepInvariantExpr represents a query which evaluates to the same result +// irrespective of the evaluation time given the raw samples from TSDB remain unchanged. +// Currently this is only used for engine optimisations and the parser does not produce this. +type StepInvariantExpr struct { + Expr Expr +} + +func (e *StepInvariantExpr) String() string { return e.Expr.String() } + +func (e *StepInvariantExpr) PositionRange() PositionRange { return e.Expr.PositionRange() } + // VectorSelector represents a Vector selection. type VectorSelector struct { - Name string + Name string + // OriginalOffset is the actual offset that was set in the query. + // This never changes. + OriginalOffset time.Duration + // Offset is the offset used during the query execution + // which is calculated using the original offset, at modifier time, + // eval time, and subquery offsets in the AST tree. Offset time.Duration + Timestamp *int64 LabelMatchers []*labels.Matcher // The unexpanded seriesSet populated at query preparation time. @@ -203,17 +228,19 @@ func (e *BinaryExpr) Type() ValueType { } return ValueTypeVector } +func (e *StepInvariantExpr) Type() ValueType { return e.Expr.Type() } -func (*AggregateExpr) PromQLExpr() {} -func (*BinaryExpr) PromQLExpr() {} -func (*Call) PromQLExpr() {} -func (*MatrixSelector) PromQLExpr() {} -func (*SubqueryExpr) PromQLExpr() {} -func (*NumberLiteral) PromQLExpr() {} -func (*ParenExpr) PromQLExpr() {} -func (*StringLiteral) PromQLExpr() {} -func (*UnaryExpr) PromQLExpr() {} -func (*VectorSelector) PromQLExpr() {} +func (*AggregateExpr) PromQLExpr() {} +func (*BinaryExpr) PromQLExpr() {} +func (*Call) PromQLExpr() {} +func (*MatrixSelector) PromQLExpr() {} +func (*SubqueryExpr) PromQLExpr() {} +func (*NumberLiteral) PromQLExpr() {} +func (*ParenExpr) PromQLExpr() {} +func (*StringLiteral) PromQLExpr() {} +func (*UnaryExpr) PromQLExpr() {} +func (*VectorSelector) PromQLExpr() {} +func (*StepInvariantExpr) PromQLExpr() {} // VectorMatchCardinality describes the cardinality relationship // of two Vectors in a binary operation. @@ -347,6 +374,8 @@ func Children(node Node) []Node { return []Node{n.Expr} case *MatrixSelector: return []Node{n.VectorSelector} + case *StepInvariantExpr: + return []Node{n.Expr} case *NumberLiteral, *StringLiteral, *VectorSelector: // nothing to do return []Node{} diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y index f0bdc320fc9..7964bf35f8a 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y @@ -83,6 +83,7 @@ NEQ NEQ_REGEX POW SUB +AT %token operatorsEnd // Aggregators. @@ -137,8 +138,8 @@ START_METRIC_SELECTOR %type grouping_label_list grouping_labels maybe_grouping_labels %type series_item series_values %type uint -%type number series_value signed_number -%type aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector +%type number series_value signed_number signed_or_unsigned_number +%type step_invariant_expr aggregate_expr aggregate_modifier bin_modifier binary_expr bool_modifier expr function_call function_call_args function_call_body group_modifiers label_matchers matrix_selector number_literal offset_expr on_or_ignoring paren_expr string_literal subquery_expr unary_expr vector_selector %type duration maybe_duration %start start @@ -187,6 +188,7 @@ expr : | subquery_expr | unary_expr | vector_selector + | step_invariant_expr ; /* @@ -200,8 +202,8 @@ aggregate_expr : aggregate_op aggregate_modifier function_call_body | aggregate_op function_call_body { $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, $2) } | aggregate_op error - { - yylex.(*parser).unexpected("aggregation",""); + { + yylex.(*parser).unexpected("aggregation",""); $$ = yylex.(*parser).newAggregateExpr($1, &AggregateExpr{}, Expressions{}) } ; @@ -380,6 +382,19 @@ offset_expr: expr OFFSET duration | expr OFFSET error { yylex.(*parser).unexpected("offset", "duration"); $$ = $1 } ; +/* + * @ modifiers. + */ + +step_invariant_expr: expr AT signed_or_unsigned_number + { + yylex.(*parser).setTimestamp($1, $3) + $$ = $1 + } + + | expr AT error + { yylex.(*parser).unexpected("@", "timestamp"); $$ = $1 } + ; /* * Subquery and range selectors. @@ -391,8 +406,10 @@ matrix_selector : expr LEFT_BRACKET duration RIGHT_BRACKET vs, ok := $1.(*VectorSelector) if !ok{ errMsg = "ranges only allowed for vector selectors" - } else if vs.Offset != 0{ + } else if vs.OriginalOffset != 0{ errMsg = "no offset modifiers allowed before range" + } else if vs.Timestamp != nil { + errMsg = "no @ modifiers allowed before range" } if errMsg != ""{ @@ -664,6 +681,8 @@ signed_number : ADD number { $$ = $2 } | SUB number { $$ = -$2 } ; +signed_or_unsigned_number: number | signed_number ; + uint : NUMBER { var err error diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go index 4117f0e7150..8d6f8b64de8 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/generated_parser.y.go @@ -3,8 +3,11 @@ //line generated_parser.y:15 package parser +import __yyfmt__ "fmt" + +//line generated_parser.y:15 + import ( - __yyfmt__ "fmt" "math" "sort" "strconv" @@ -12,7 +15,7 @@ import ( "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/pkg/value" -) //line generated_parser.y:15 +) //line generated_parser.y:28 type yySymType struct { @@ -69,37 +72,38 @@ const NEQ = 57381 const NEQ_REGEX = 57382 const POW = 57383 const SUB = 57384 -const operatorsEnd = 57385 -const aggregatorsStart = 57386 -const AVG = 57387 -const BOTTOMK = 57388 -const COUNT = 57389 -const COUNT_VALUES = 57390 -const GROUP = 57391 -const MAX = 57392 -const MIN = 57393 -const QUANTILE = 57394 -const STDDEV = 57395 -const STDVAR = 57396 -const SUM = 57397 -const TOPK = 57398 -const aggregatorsEnd = 57399 -const keywordsStart = 57400 -const BOOL = 57401 -const BY = 57402 -const GROUP_LEFT = 57403 -const GROUP_RIGHT = 57404 -const IGNORING = 57405 -const OFFSET = 57406 -const ON = 57407 -const WITHOUT = 57408 -const keywordsEnd = 57409 -const startSymbolsStart = 57410 -const START_METRIC = 57411 -const START_SERIES_DESCRIPTION = 57412 -const START_EXPRESSION = 57413 -const START_METRIC_SELECTOR = 57414 -const startSymbolsEnd = 57415 +const AT = 57385 +const operatorsEnd = 57386 +const aggregatorsStart = 57387 +const AVG = 57388 +const BOTTOMK = 57389 +const COUNT = 57390 +const COUNT_VALUES = 57391 +const GROUP = 57392 +const MAX = 57393 +const MIN = 57394 +const QUANTILE = 57395 +const STDDEV = 57396 +const STDVAR = 57397 +const SUM = 57398 +const TOPK = 57399 +const aggregatorsEnd = 57400 +const keywordsStart = 57401 +const BOOL = 57402 +const BY = 57403 +const GROUP_LEFT = 57404 +const GROUP_RIGHT = 57405 +const IGNORING = 57406 +const OFFSET = 57407 +const ON = 57408 +const WITHOUT = 57409 +const keywordsEnd = 57410 +const startSymbolsStart = 57411 +const START_METRIC = 57412 +const START_SERIES_DESCRIPTION = 57413 +const START_EXPRESSION = 57414 +const START_METRIC_SELECTOR = 57415 +const startSymbolsEnd = 57416 var yyToknames = [...]string{ "$end", @@ -144,6 +148,7 @@ var yyToknames = [...]string{ "NEQ_REGEX", "POW", "SUB", + "AT", "operatorsEnd", "aggregatorsStart", "AVG", @@ -182,7 +187,7 @@ const yyEofCode = 1 const yyErrCode = 2 const yyInitialStackSize = 16 -//line generated_parser.y:710 +//line generated_parser.y:729 //line yacctab:1 var yyExca = [...]int{ @@ -190,349 +195,357 @@ var yyExca = [...]int{ 1, -1, -2, 0, -1, 33, - 1, 121, - 10, 121, - 22, 121, + 1, 124, + 10, 124, + 22, 124, -2, 0, - -1, 55, - 2, 133, - 15, 133, - 60, 133, - 66, 133, - -2, 89, -1, 56, - 2, 134, - 15, 134, - 60, 134, - 66, 134, - -2, 90, - -1, 57, - 2, 135, - 15, 135, - 60, 135, - 66, 135, - -2, 92, - -1, 58, 2, 136, 15, 136, - 60, 136, - 66, 136, - -2, 93, - -1, 59, + 61, 136, + 67, 136, + -2, 92, + -1, 57, 2, 137, 15, 137, - 60, 137, - 66, 137, - -2, 94, - -1, 60, + 61, 137, + 67, 137, + -2, 93, + -1, 58, 2, 138, 15, 138, - 60, 138, - 66, 138, - -2, 99, - -1, 61, + 61, 138, + 67, 138, + -2, 95, + -1, 59, 2, 139, 15, 139, - 60, 139, - 66, 139, - -2, 101, - -1, 62, + 61, 139, + 67, 139, + -2, 96, + -1, 60, 2, 140, 15, 140, - 60, 140, - 66, 140, - -2, 103, - -1, 63, + 61, 140, + 67, 140, + -2, 97, + -1, 61, 2, 141, 15, 141, - 60, 141, - 66, 141, - -2, 104, - -1, 64, + 61, 141, + 67, 141, + -2, 102, + -1, 62, 2, 142, 15, 142, - 60, 142, - 66, 142, - -2, 105, - -1, 65, + 61, 142, + 67, 142, + -2, 104, + -1, 63, 2, 143, 15, 143, - 60, 143, - 66, 143, + 61, 143, + 67, 143, -2, 106, - -1, 66, + -1, 64, 2, 144, 15, 144, - 60, 144, - 66, 144, + 61, 144, + 67, 144, -2, 107, - -1, 176, - 12, 184, - 13, 184, - 16, 184, - 17, 184, - 23, 184, - 26, 184, - 32, 184, - 33, 184, - 36, 184, - 42, 184, - 45, 184, - 46, 184, - 47, 184, - 48, 184, - 49, 184, - 50, 184, - 51, 184, - 52, 184, - 53, 184, - 54, 184, - 55, 184, - 56, 184, - 60, 184, - 64, 184, - 66, 184, + -1, 65, + 2, 145, + 15, 145, + 61, 145, + 67, 145, + -2, 108, + -1, 66, + 2, 146, + 15, 146, + 61, 146, + 67, 146, + -2, 109, + -1, 67, + 2, 147, + 15, 147, + 61, 147, + 67, 147, + -2, 110, + -1, 182, + 12, 189, + 13, 189, + 16, 189, + 17, 189, + 23, 189, + 26, 189, + 32, 189, + 33, 189, + 36, 189, + 42, 189, + 46, 189, + 47, 189, + 48, 189, + 49, 189, + 50, 189, + 51, 189, + 52, 189, + 53, 189, + 54, 189, + 55, 189, + 56, 189, + 57, 189, + 61, 189, + 65, 189, + 67, 189, -2, 0, - -1, 177, - 12, 184, - 13, 184, - 16, 184, - 17, 184, - 23, 184, - 26, 184, - 32, 184, - 33, 184, - 36, 184, - 42, 184, - 45, 184, - 46, 184, - 47, 184, - 48, 184, - 49, 184, - 50, 184, - 51, 184, - 52, 184, - 53, 184, - 54, 184, - 55, 184, - 56, 184, - 60, 184, - 64, 184, - 66, 184, + -1, 183, + 12, 189, + 13, 189, + 16, 189, + 17, 189, + 23, 189, + 26, 189, + 32, 189, + 33, 189, + 36, 189, + 42, 189, + 46, 189, + 47, 189, + 48, 189, + 49, 189, + 50, 189, + 51, 189, + 52, 189, + 53, 189, + 54, 189, + 55, 189, + 56, 189, + 57, 189, + 61, 189, + 65, 189, + 67, 189, -2, 0, - -1, 193, - 19, 182, + -1, 201, + 19, 187, -2, 0, - -1, 241, - 19, 183, + -1, 247, + 19, 188, -2, 0, } const yyPrivate = 57344 -const yyLast = 598 +const yyLast = 629 var yyAct = [...]int{ - 247, 197, 35, 136, 237, 238, 168, 169, 108, 74, - 97, 96, 99, 174, 121, 175, 98, 250, 100, 176, - 177, 230, 95, 54, 231, 229, 171, 48, 69, 101, - 50, 22, 49, 163, 245, 148, 251, 225, 51, 244, - 116, 67, 172, 6, 248, 170, 228, 18, 19, 92, - 224, 20, 243, 103, 162, 104, 69, 68, 117, 102, - 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, - 65, 66, 94, 95, 99, 13, 101, 105, 31, 24, - 100, 30, 7, 252, 8, 79, 80, 81, 33, 82, - 83, 84, 85, 86, 87, 88, 89, 90, 91, 139, - 92, 93, 145, 78, 149, 143, 146, 141, 110, 142, - 2, 3, 4, 5, 242, 144, 32, 115, 109, 114, - 173, 138, 161, 94, 226, 178, 179, 180, 181, 182, - 183, 184, 185, 186, 187, 188, 189, 190, 191, 122, - 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, - 133, 134, 135, 153, 46, 140, 10, 137, 152, 1, - 70, 227, 138, 155, 138, 156, 71, 240, 45, 151, - 34, 95, 48, 69, 232, 50, 22, 49, 233, 234, - 235, 236, 239, 51, 80, 53, 67, 194, 9, 9, - 158, 193, 18, 19, 89, 90, 20, 241, 92, 44, - 157, 159, 68, 43, 192, 55, 56, 57, 58, 59, - 60, 61, 62, 63, 64, 65, 66, 42, 165, 76, - 13, 94, 120, 41, 24, 167, 30, 40, 246, 75, - 170, 39, 249, 48, 69, 160, 50, 22, 49, 171, - 113, 118, 110, 147, 51, 112, 254, 67, 38, 76, - 119, 255, 109, 18, 19, 172, 111, 20, 107, 75, - 37, 36, 166, 68, 77, 73, 55, 56, 57, 58, - 59, 60, 61, 62, 63, 64, 65, 66, 199, 164, - 195, 13, 72, 52, 198, 24, 154, 30, 209, 47, - 106, 0, 215, 0, 0, 0, 253, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 211, 212, - 0, 0, 213, 0, 0, 0, 0, 0, 0, 0, - 0, 200, 202, 204, 205, 206, 214, 216, 219, 220, - 221, 222, 223, 199, 0, 201, 203, 207, 208, 210, - 217, 218, 0, 209, 0, 0, 0, 215, 0, 0, - 0, 196, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 211, 212, 0, 0, 213, 0, 0, - 0, 0, 0, 0, 0, 0, 200, 202, 204, 205, - 206, 214, 216, 219, 220, 221, 222, 223, 0, 0, - 201, 203, 207, 208, 210, 217, 218, 17, 69, 0, - 0, 22, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 18, 19, 0, - 0, 20, 17, 31, 0, 0, 22, 0, 0, 0, - 11, 12, 14, 15, 16, 21, 23, 25, 26, 27, - 28, 29, 18, 19, 0, 13, 20, 0, 0, 24, - 0, 30, 0, 0, 0, 11, 12, 14, 15, 16, - 21, 23, 25, 26, 27, 28, 29, 95, 0, 0, - 13, 0, 0, 150, 24, 0, 30, 0, 0, 79, - 80, 81, 0, 82, 83, 84, 85, 86, 87, 88, - 89, 90, 91, 0, 92, 93, 0, 0, 95, 0, + 253, 35, 205, 138, 244, 243, 144, 110, 75, 99, + 98, 180, 101, 181, 182, 183, 6, 100, 102, 123, + 55, 256, 238, 143, 237, 172, 118, 97, 49, 70, + 103, 51, 22, 50, 233, 103, 157, 257, 254, 52, + 81, 145, 68, 70, 154, 236, 171, 232, 18, 19, + 90, 91, 20, 105, 93, 106, 96, 153, 69, 104, + 31, 119, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 251, 107, 101, 13, 95, 250, + 142, 24, 102, 30, 2, 3, 4, 5, 258, 248, + 97, 239, 249, 146, 202, 145, 140, 79, 201, 7, + 32, 148, 80, 81, 146, 158, 152, 155, 150, 147, + 151, 200, 8, 90, 91, 149, 33, 93, 94, 96, + 147, 179, 140, 170, 1, 178, 184, 185, 186, 187, + 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, + 246, 95, 177, 124, 125, 126, 127, 128, 129, 130, + 131, 132, 133, 134, 135, 136, 137, 164, 139, 165, + 46, 45, 162, 10, 44, 140, 71, 161, 43, 235, + 198, 199, 97, 72, 34, 42, 49, 70, 160, 51, + 22, 50, 240, 122, 167, 241, 242, 52, 112, 245, + 68, 41, 77, 112, 166, 168, 18, 19, 111, 93, + 20, 96, 76, 111, 234, 247, 69, 40, 169, 109, + 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, + 66, 67, 117, 95, 116, 13, 39, 120, 156, 24, + 38, 30, 121, 54, 77, 252, 9, 9, 49, 70, + 255, 51, 22, 50, 76, 37, 36, 47, 174, 52, + 74, 115, 68, 260, 141, 176, 114, 261, 18, 19, + 145, 175, 20, 78, 173, 203, 73, 113, 69, 146, + 53, 206, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 207, 147, 163, 13, 48, 108, + 0, 24, 0, 30, 217, 0, 0, 0, 223, 0, + 0, 0, 259, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 219, 220, 0, 0, 221, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 208, 210, + 212, 213, 214, 222, 224, 227, 228, 229, 230, 231, + 207, 0, 209, 211, 215, 216, 218, 225, 226, 0, + 217, 0, 0, 0, 223, 0, 0, 0, 204, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 79, 80, 81, 0, 82, 83, 84, 94, 86, 87, - 88, 89, 90, 91, 0, 92, 93, 0, 0, 95, + 219, 220, 0, 0, 221, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 208, 210, 212, 213, 214, 222, + 224, 227, 228, 229, 230, 231, 0, 0, 209, 211, + 215, 216, 218, 225, 226, 17, 70, 0, 0, 22, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 79, 80, 81, 0, 82, 83, 95, 94, 86, - 87, 0, 89, 90, 91, 0, 92, 93, 0, 79, - 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 89, 90, 0, 0, 92, 93, 0, 0, 0, 94, + 0, 0, 0, 0, 0, 18, 19, 0, 0, 20, + 0, 17, 31, 0, 0, 22, 0, 0, 0, 11, + 12, 14, 15, 16, 21, 23, 25, 26, 27, 28, + 29, 18, 19, 0, 13, 20, 0, 0, 24, 0, + 30, 0, 0, 0, 0, 11, 12, 14, 15, 16, + 21, 23, 25, 26, 27, 28, 29, 97, 0, 0, + 13, 0, 0, 159, 24, 0, 30, 0, 0, 80, + 81, 82, 0, 83, 84, 85, 86, 87, 88, 89, + 90, 91, 92, 0, 93, 94, 96, 0, 0, 0, + 0, 0, 0, 97, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 80, 81, 82, 95, 83, + 84, 85, 86, 87, 88, 89, 90, 91, 92, 0, + 93, 94, 96, 0, 0, 97, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 80, 81, 82, + 0, 83, 84, 85, 95, 87, 88, 89, 90, 91, + 92, 0, 93, 94, 96, 0, 0, 97, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 80, + 81, 82, 0, 83, 84, 0, 95, 87, 88, 0, + 90, 91, 92, 0, 93, 94, 96, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 94, + 0, 0, 0, 0, 0, 0, 0, 0, 95, } var yyPact = [...]int{ - 41, 72, 410, 410, 160, 385, -1000, -1000, -1000, 65, + 14, 89, 419, 419, 164, 393, -1000, -1000, -1000, 47, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, 247, -1000, 101, -1000, 59, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, 14, 61, -1000, - 221, -1000, 221, 43, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 240, - -1000, -1000, 238, -1000, -1000, 115, -1000, 18, -1000, -45, - -45, -45, -45, -45, -45, -45, -45, -45, -45, -45, - -45, -45, -45, -45, 155, 153, 61, -48, -1000, 100, - 100, 15, -1000, 453, 8, -1000, 151, -1000, -1000, 161, - -1000, -1000, 217, -1000, 31, -1000, 213, 221, -1000, -50, - -42, -1000, 221, 221, 221, 221, 221, 221, 221, 221, - 221, 221, 221, 221, 221, 221, -1000, -1000, -1000, 185, - -1000, -1000, -1000, -1000, 331, -1000, -1000, 30, -1000, 59, - -1000, -1000, 106, -1000, 23, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, -3, 0, -1000, -1000, -1000, - -1000, 28, 28, 157, 100, 100, 100, 100, 8, 533, - 533, 533, 515, 484, 533, 533, 515, 8, 8, 533, - 8, 157, -1000, 112, -1000, 32, -1000, -1000, -1000, -1000, + -1000, 232, -1000, 95, -1000, 499, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, 15, 20, + -1000, 226, -1000, 226, 30, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + 191, -1000, -1000, 249, -1000, -1000, 220, -1000, 4, -1000, + -41, -41, -41, -41, -41, -41, -41, -41, -41, -41, + -41, -41, -41, -41, -41, 156, 78, 113, 20, -49, + -1000, 42, 42, 16, -1000, 463, 158, -1000, 160, -1000, + -1000, 155, -1000, -1000, 190, -1000, 23, -1000, 243, 226, + -1000, -53, -48, -1000, 226, 226, 226, 226, 226, 226, + 226, 226, 226, 226, 226, 226, 226, 226, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, 24, 24, 92, -1000, + -1000, -1000, -1000, 338, -1000, -1000, 27, -1000, 499, -1000, + -1000, 186, -1000, 22, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -2, 67, -1000, -1000, -1000, 13, + 42, 42, 42, 42, 158, 76, 76, 76, 563, 531, + 76, 76, 563, 158, 158, 76, 158, 13, -1000, -1000, + -1000, 87, -1000, 72, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - -1000, -1000, -1000, -1000, -1000, 221, -1000, -1000, -1000, -1000, - 27, 27, -7, -1000, -1000, -1000, -1000, -1000, -1000, -1000, - 17, 81, -1000, -1000, 276, -1000, 59, -1000, -1000, -1000, - 27, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, + -1000, -1000, -1000, 226, -1000, -1000, -1000, -1000, 21, 21, + -3, -1000, -1000, -1000, -1000, -1000, 18, 86, -1000, -1000, + 282, -1000, 499, -1000, -1000, -1000, 21, -1000, -1000, -1000, + -1000, -1000, } var yyPgo = [...]int{ - 0, 290, 8, 289, 1, 286, 284, 185, 283, 156, - 282, 84, 9, 280, 5, 4, 279, 264, 0, 6, - 262, 7, 261, 11, 58, 260, 250, 2, 248, 243, - 10, 241, 23, 231, 227, 223, 222, 217, 203, 199, - 168, 154, 3, 167, 159, 116, + 0, 289, 7, 288, 2, 286, 271, 233, 270, 163, + 266, 112, 8, 265, 4, 5, 264, 263, 0, 23, + 261, 6, 254, 247, 246, 10, 61, 245, 232, 1, + 230, 228, 9, 227, 20, 226, 207, 191, 183, 175, + 168, 164, 161, 160, 3, 140, 124, 100, } var yyR1 = [...]int{ - 0, 44, 44, 44, 44, 44, 44, 44, 27, 27, - 27, 27, 27, 27, 27, 27, 27, 27, 27, 22, - 22, 22, 22, 23, 23, 25, 25, 25, 25, 25, - 25, 25, 25, 25, 25, 25, 25, 25, 25, 25, - 24, 26, 26, 36, 36, 31, 31, 31, 31, 14, - 14, 14, 14, 13, 13, 13, 4, 4, 28, 30, - 30, 29, 29, 29, 37, 35, 35, 33, 39, 39, - 39, 39, 39, 40, 41, 41, 41, 32, 32, 32, - 1, 1, 1, 2, 2, 2, 2, 11, 11, 7, + 0, 46, 46, 46, 46, 46, 46, 46, 29, 29, + 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, + 24, 24, 24, 24, 25, 25, 27, 27, 27, 27, + 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, + 27, 26, 28, 28, 38, 38, 33, 33, 33, 33, + 14, 14, 14, 14, 13, 13, 13, 4, 4, 30, + 32, 32, 31, 31, 31, 39, 37, 37, 23, 23, + 35, 41, 41, 41, 41, 41, 42, 43, 43, 43, + 34, 34, 34, 1, 1, 1, 2, 2, 2, 2, + 11, 11, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, - 7, 7, 7, 7, 7, 7, 7, 7, 7, 9, - 9, 9, 9, 10, 10, 10, 12, 12, 12, 12, - 45, 17, 17, 17, 17, 16, 16, 16, 16, 16, - 20, 20, 20, 3, 3, 3, 3, 3, 3, 3, - 3, 3, 3, 3, 3, 6, 6, 6, 6, 6, + 7, 7, 9, 9, 9, 9, 10, 10, 10, 12, + 12, 12, 12, 47, 17, 17, 17, 17, 16, 16, + 16, 16, 16, 20, 20, 20, 3, 3, 3, 3, + 3, 3, 3, 3, 3, 3, 3, 3, 6, 6, + 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, - 6, 6, 6, 6, 6, 6, 6, 6, 6, 8, - 8, 5, 5, 5, 5, 34, 19, 21, 21, 18, - 42, 38, 43, 43, 15, 15, + 6, 6, 8, 8, 5, 5, 5, 5, 36, 19, + 21, 21, 22, 22, 18, 44, 40, 45, 45, 15, + 15, } var yyR2 = [...]int{ 0, 2, 2, 2, 2, 2, 2, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, - 3, 2, 2, 2, 2, 4, 4, 4, 4, 4, + 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, + 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, - 1, 0, 1, 3, 3, 1, 1, 3, 3, 3, - 4, 2, 1, 3, 1, 2, 1, 1, 2, 3, - 2, 3, 1, 2, 3, 3, 3, 4, 6, 6, - 5, 4, 3, 2, 2, 1, 1, 3, 4, 2, - 3, 1, 2, 3, 3, 2, 1, 2, 1, 1, + 4, 1, 0, 1, 3, 3, 1, 1, 3, 3, + 3, 4, 2, 1, 3, 1, 2, 1, 1, 2, + 3, 2, 3, 1, 2, 3, 3, 3, 3, 3, + 4, 6, 6, 5, 4, 3, 2, 2, 1, 1, + 3, 4, 2, 3, 1, 2, 3, 3, 2, 1, + 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, - 4, 2, 0, 3, 1, 2, 3, 3, 2, 1, - 2, 0, 3, 2, 1, 1, 3, 1, 3, 4, + 1, 1, 3, 4, 2, 0, 3, 1, 2, 3, + 3, 2, 1, 2, 0, 3, 2, 1, 1, 3, + 1, 3, 4, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, - 1, 1, 1, 1, 1, 1, 1, 2, 2, 1, - 1, 1, 0, 1, 0, 1, + 2, 2, 1, 1, 1, 1, 1, 0, 1, 0, + 1, } var yyChk = [...]int{ - -1000, -44, 69, 70, 71, 72, 2, 10, -11, -7, - -9, 45, 46, 60, 47, 48, 49, 12, 32, 33, - 36, 50, 16, 51, 64, 52, 53, 54, 55, 56, - 66, 13, -45, -11, 10, -27, -22, -25, -28, -33, - -34, -35, -37, -38, -39, -40, -41, -3, 12, 17, - 15, 23, -8, -7, -32, 45, 46, 47, 48, 49, - 50, 51, 52, 53, 54, 55, 56, 26, 42, 13, - -41, -9, -10, 18, -12, 12, 2, -17, 2, 26, - 27, 28, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 41, 42, 64, 14, -23, -30, 2, 60, - 66, 15, -30, -27, -27, -32, -1, 18, -2, 12, - 2, 18, 7, 2, 4, 2, 22, -24, -31, -26, - -36, 59, -24, -24, -24, -24, -24, -24, -24, -24, - -24, -24, -24, -24, -24, -24, -42, 2, 9, -42, - 2, -30, -23, -14, 15, 2, -14, -29, 20, -27, - 20, 18, 7, 2, -5, 2, 4, 39, 29, 40, - 18, -12, 23, 2, -16, 5, -20, 12, -19, -21, - 17, 26, 42, -27, 63, 65, 61, 62, -27, -27, - -27, -27, -27, -27, -27, -27, -27, -27, -27, -27, - -27, -27, 19, 6, 2, -13, 20, -4, -6, 2, - 45, 59, 46, 60, 47, 48, 49, 61, 62, 12, - 63, 32, 33, 36, 50, 16, 51, 64, 65, 52, - 53, 54, 55, 56, 20, 7, 18, -2, 23, 2, - 24, 24, -21, -19, -19, -14, -14, -15, -14, -15, - -43, -42, 2, 20, 7, 2, -27, -18, 17, -18, - 24, 19, 2, 20, -4, -18, + -1000, -46, 70, 71, 72, 73, 2, 10, -11, -7, + -9, 46, 47, 61, 48, 49, 50, 12, 32, 33, + 36, 51, 16, 52, 65, 53, 54, 55, 56, 57, + 67, 13, -47, -11, 10, -29, -24, -27, -30, -35, + -36, -37, -39, -40, -41, -42, -43, -23, -3, 12, + 17, 15, 23, -8, -7, -34, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 26, 42, + 13, -43, -9, -10, 18, -12, 12, 2, -17, 2, + 26, 27, 28, 30, 31, 32, 33, 34, 35, 36, + 37, 38, 39, 41, 42, 65, 43, 14, -25, -32, + 2, 61, 67, 15, -32, -29, -29, -34, -1, 18, + -2, 12, 2, 18, 7, 2, 4, 2, 22, -26, + -33, -28, -38, 60, -26, -26, -26, -26, -26, -26, + -26, -26, -26, -26, -26, -26, -26, -26, -44, 2, + 9, -22, 2, -19, -21, 17, 26, 42, -44, 2, + -32, -25, -14, 15, 2, -14, -31, 20, -29, 20, + 18, 7, 2, -5, 2, 4, 39, 29, 40, 18, + -12, 23, 2, -16, 5, -20, 12, -19, -21, -29, + 64, 66, 62, 63, -29, -29, -29, -29, -29, -29, + -29, -29, -29, -29, -29, -29, -29, -29, -19, -19, + 19, 6, 2, -13, 20, -4, -6, 2, 46, 60, + 47, 61, 48, 49, 50, 62, 63, 12, 64, 32, + 33, 36, 51, 16, 52, 65, 66, 53, 54, 55, + 56, 57, 20, 7, 18, -2, 23, 2, 24, 24, + -21, -14, -14, -15, -14, -15, -45, -44, 2, 20, + 7, 2, -29, -18, 17, -18, 24, 19, 2, 20, + -4, -18, } var yyDef = [...]int{ - 0, -2, 112, 112, 0, 0, 7, 6, 1, 112, - 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, - 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, - 108, 0, 2, -2, 3, 4, 8, 9, 10, 11, - 12, 13, 14, 15, 16, 17, 18, 0, 95, 175, - 0, 181, 0, 75, 76, -2, -2, -2, -2, -2, - -2, -2, -2, -2, -2, -2, -2, 169, 170, 0, - 5, 87, 0, 111, 114, 0, 119, 120, 124, 41, - 41, 41, 41, 41, 41, 41, 41, 41, 41, 41, - 41, 41, 41, 41, 0, 0, 0, 21, 22, 0, - 0, 0, 58, 0, 73, 74, 0, 79, 81, 0, - 86, 109, 0, 115, 0, 118, 123, 0, 40, 45, - 46, 42, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 65, 66, 180, 0, - 72, 19, 20, 23, 0, 52, 24, 0, 60, 62, - 64, 77, 0, 82, 0, 85, 171, 172, 173, 174, - 110, 113, 116, 117, 122, 125, 127, 130, 131, 132, - 176, 0, 0, 25, 0, 0, -2, -2, 26, 27, - 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, - 38, 39, 67, -2, 71, 0, 51, 54, 56, 57, - 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, - 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, - 165, 166, 167, 168, 59, 63, 78, 80, 83, 84, - 0, 0, 0, 177, 178, 43, 44, 47, 185, 48, - 0, -2, 70, 49, 0, 55, 61, 126, 179, 128, - 0, 68, 69, 50, 53, 129, + 0, -2, 115, 115, 0, 0, 7, 6, 1, 115, + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, + 111, 0, 2, -2, 3, 4, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, 18, 19, 0, 98, + 178, 0, 186, 0, 78, 79, -2, -2, -2, -2, + -2, -2, -2, -2, -2, -2, -2, -2, 172, 173, + 0, 5, 90, 0, 114, 117, 0, 122, 123, 127, + 42, 42, 42, 42, 42, 42, 42, 42, 42, 42, + 42, 42, 42, 42, 42, 0, 0, 0, 0, 22, + 23, 0, 0, 0, 59, 0, 76, 77, 0, 82, + 84, 0, 89, 112, 0, 118, 0, 121, 126, 0, + 41, 46, 47, 43, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 66, 67, + 185, 68, 69, 182, 183, 179, 0, 0, 0, 75, + 20, 21, 24, 0, 53, 25, 0, 61, 63, 65, + 80, 0, 85, 0, 88, 174, 175, 176, 177, 113, + 116, 119, 120, 125, 128, 130, 133, 134, 135, 26, + 0, 0, -2, -2, 27, 28, 29, 30, 31, 32, + 33, 34, 35, 36, 37, 38, 39, 40, 180, 181, + 70, -2, 74, 0, 52, 55, 57, 58, 148, 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, + 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, + 170, 171, 60, 64, 81, 83, 86, 87, 0, 0, + 0, 44, 45, 48, 190, 49, 0, -2, 73, 50, + 0, 56, 62, 129, 184, 131, 0, 71, 72, 51, + 54, 132, } var yyTok1 = [...]int{ @@ -547,7 +560,7 @@ var yyTok2 = [...]int{ 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, - 72, 73, + 72, 73, 74, } var yyTok3 = [...]int{ 0, @@ -892,277 +905,277 @@ yydefault: case 1: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:165 +//line generated_parser.y:166 { yylex.(*parser).generatedParserResult = yyDollar[2].labels } case 3: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:168 +//line generated_parser.y:169 { yylex.(*parser).addParseErrf(PositionRange{}, "no expression found in input") } case 4: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:170 +//line generated_parser.y:171 { yylex.(*parser).generatedParserResult = yyDollar[2].node } case 5: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:172 +//line generated_parser.y:173 { yylex.(*parser).generatedParserResult = yyDollar[2].node } case 7: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:175 +//line generated_parser.y:176 { yylex.(*parser).unexpected("", "") } - case 19: + case 20: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:197 +//line generated_parser.y:199 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[2].node, yyDollar[3].node) } - case 20: + case 21: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:199 +//line generated_parser.y:201 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, yyDollar[3].node, yyDollar[2].node) } - case 21: + case 22: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:201 +//line generated_parser.y:203 { yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, yyDollar[2].node) } - case 22: + case 23: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:203 +//line generated_parser.y:205 { yylex.(*parser).unexpected("aggregation", "") yyVAL.node = yylex.(*parser).newAggregateExpr(yyDollar[1].item, &AggregateExpr{}, Expressions{}) } - case 23: + case 24: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:211 +//line generated_parser.y:213 { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, } } - case 24: + case 25: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:217 +//line generated_parser.y:219 { yyVAL.node = &AggregateExpr{ Grouping: yyDollar[2].strings, Without: true, } } - case 25: - yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:230 - { - yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) - } case 26: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:231 +//line generated_parser.y:232 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 27: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:232 +//line generated_parser.y:233 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 28: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:233 +//line generated_parser.y:234 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 29: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:234 +//line generated_parser.y:235 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 30: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:235 +//line generated_parser.y:236 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 31: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:236 +//line generated_parser.y:237 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 32: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:237 +//line generated_parser.y:238 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 33: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:238 +//line generated_parser.y:239 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 34: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:239 +//line generated_parser.y:240 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 35: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:240 +//line generated_parser.y:241 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 36: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:241 +//line generated_parser.y:242 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 37: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:242 +//line generated_parser.y:243 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 38: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:243 +//line generated_parser.y:244 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } case 39: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:244 +//line generated_parser.y:245 + { + yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) + } + case 40: + yyDollar = yyS[yypt-4 : yypt+1] +//line generated_parser.y:246 { yyVAL.node = yylex.(*parser).newBinaryExpression(yyDollar[1].node, yyDollar[2].item, yyDollar[3].node, yyDollar[4].node) } - case 41: + case 42: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:252 +//line generated_parser.y:254 { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, } } - case 42: + case 43: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:257 +//line generated_parser.y:259 { yyVAL.node = &BinaryExpr{ VectorMatching: &VectorMatching{Card: CardOneToOne}, ReturnBool: true, } } - case 43: + case 44: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:265 +//line generated_parser.y:267 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings } - case 44: + case 45: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:270 +//line generated_parser.y:272 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.MatchingLabels = yyDollar[3].strings yyVAL.node.(*BinaryExpr).VectorMatching.On = true } - case 47: + case 48: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:280 +//line generated_parser.y:282 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardManyToOne yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 48: + case 49: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:286 +//line generated_parser.y:288 { yyVAL.node = yyDollar[1].node yyVAL.node.(*BinaryExpr).VectorMatching.Card = CardOneToMany yyVAL.node.(*BinaryExpr).VectorMatching.Include = yyDollar[3].strings } - case 49: + case 50: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:295 +//line generated_parser.y:297 { yyVAL.strings = yyDollar[2].strings } - case 50: + case 51: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:297 +//line generated_parser.y:299 { yyVAL.strings = yyDollar[2].strings } - case 51: + case 52: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:299 +//line generated_parser.y:301 { yyVAL.strings = []string{} } - case 52: + case 53: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:301 +//line generated_parser.y:303 { yylex.(*parser).unexpected("grouping opts", "\"(\"") yyVAL.strings = nil } - case 53: + case 54: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:307 +//line generated_parser.y:309 { yyVAL.strings = append(yyDollar[1].strings, yyDollar[3].item.Val) } - case 54: + case 55: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:309 +//line generated_parser.y:311 { yyVAL.strings = []string{yyDollar[1].item.Val} } - case 55: + case 56: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:311 +//line generated_parser.y:313 { yylex.(*parser).unexpected("grouping opts", "\",\" or \")\"") yyVAL.strings = yyDollar[1].strings } - case 56: + case 57: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:315 +//line generated_parser.y:317 { if !isLabel(yyDollar[1].item.Val) { yylex.(*parser).unexpected("grouping opts", "label") } yyVAL.item = yyDollar[1].item } - case 57: + case 58: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:322 +//line generated_parser.y:324 { yylex.(*parser).unexpected("grouping opts", "label") yyVAL.item = Item{} } - case 58: + case 59: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:330 +//line generated_parser.y:332 { fn, exist := getFunction(yyDollar[1].item.Val) if !exist { @@ -1177,67 +1190,83 @@ yydefault: }, } } - case 59: + case 60: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:347 +//line generated_parser.y:349 { yyVAL.node = yyDollar[2].node } - case 60: + case 61: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:349 +//line generated_parser.y:351 { yyVAL.node = Expressions{} } - case 61: + case 62: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:353 +//line generated_parser.y:355 { yyVAL.node = append(yyDollar[1].node.(Expressions), yyDollar[3].node.(Expr)) } - case 62: + case 63: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:355 +//line generated_parser.y:357 { yyVAL.node = Expressions{yyDollar[1].node.(Expr)} } - case 63: + case 64: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:357 +//line generated_parser.y:359 { yylex.(*parser).addParseErrf(yyDollar[2].item.PositionRange(), "trailing commas not allowed in function call args") yyVAL.node = yyDollar[1].node } - case 64: + case 65: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:368 +//line generated_parser.y:370 { yyVAL.node = &ParenExpr{Expr: yyDollar[2].node.(Expr), PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item)} } - case 65: + case 66: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:376 +//line generated_parser.y:378 { yylex.(*parser).addOffset(yyDollar[1].node, yyDollar[3].duration) yyVAL.node = yyDollar[1].node } - case 66: + case 67: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:381 +//line generated_parser.y:383 { yylex.(*parser).unexpected("offset", "duration") yyVAL.node = yyDollar[1].node } - case 67: + case 68: + yyDollar = yyS[yypt-3 : yypt+1] +//line generated_parser.y:390 + { + yylex.(*parser).setTimestamp(yyDollar[1].node, yyDollar[3].float) + yyVAL.node = yyDollar[1].node + } + case 69: + yyDollar = yyS[yypt-3 : yypt+1] +//line generated_parser.y:396 + { + yylex.(*parser).unexpected("@", "timestamp") + yyVAL.node = yyDollar[1].node + } + case 70: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:389 +//line generated_parser.y:404 { var errMsg string vs, ok := yyDollar[1].node.(*VectorSelector) if !ok { errMsg = "ranges only allowed for vector selectors" - } else if vs.Offset != 0 { + } else if vs.OriginalOffset != 0 { errMsg = "no offset modifiers allowed before range" + } else if vs.Timestamp != nil { + errMsg = "no @ modifiers allowed before range" } if errMsg != "" { @@ -1251,9 +1280,9 @@ yydefault: EndPos: yylex.(*parser).lastClosing, } } - case 68: + case 71: yyDollar = yyS[yypt-6 : yypt+1] -//line generated_parser.y:412 +//line generated_parser.y:429 { yyVAL.node = &SubqueryExpr{ Expr: yyDollar[1].node.(Expr), @@ -1263,37 +1292,37 @@ yydefault: EndPos: yyDollar[6].item.Pos + 1, } } - case 69: + case 72: yyDollar = yyS[yypt-6 : yypt+1] -//line generated_parser.y:422 +//line generated_parser.y:439 { yylex.(*parser).unexpected("subquery selector", "\"]\"") yyVAL.node = yyDollar[1].node } - case 70: + case 73: yyDollar = yyS[yypt-5 : yypt+1] -//line generated_parser.y:424 +//line generated_parser.y:441 { yylex.(*parser).unexpected("subquery selector", "duration or \"]\"") yyVAL.node = yyDollar[1].node } - case 71: + case 74: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:426 +//line generated_parser.y:443 { yylex.(*parser).unexpected("subquery or range", "\":\" or \"]\"") yyVAL.node = yyDollar[1].node } - case 72: + case 75: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:428 +//line generated_parser.y:445 { yylex.(*parser).unexpected("subquery selector", "duration") yyVAL.node = yyDollar[1].node } - case 73: + case 76: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:438 +//line generated_parser.y:455 { if nl, ok := yyDollar[2].node.(*NumberLiteral); ok { if yyDollar[1].item.Typ == SUB { @@ -1305,9 +1334,9 @@ yydefault: yyVAL.node = &UnaryExpr{Op: yyDollar[1].item.Typ, Expr: yyDollar[2].node.(Expr), StartPos: yyDollar[1].item.Pos} } } - case 74: + case 77: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:456 +//line generated_parser.y:473 { vs := yyDollar[2].node.(*VectorSelector) vs.PosRange = mergeRanges(&yyDollar[1].item, vs) @@ -1315,9 +1344,9 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 75: + case 78: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:464 +//line generated_parser.y:481 { vs := &VectorSelector{ Name: yyDollar[1].item.Val, @@ -1327,44 +1356,44 @@ yydefault: yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 76: + case 79: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:474 +//line generated_parser.y:491 { vs := yyDollar[1].node.(*VectorSelector) yylex.(*parser).assembleVectorSelector(vs) yyVAL.node = vs } - case 77: + case 80: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:482 +//line generated_parser.y:499 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[3].item), } } - case 78: + case 81: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:489 +//line generated_parser.y:506 { yyVAL.node = &VectorSelector{ LabelMatchers: yyDollar[2].matchers, PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[4].item), } } - case 79: + case 82: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:496 +//line generated_parser.y:513 { yyVAL.node = &VectorSelector{ LabelMatchers: []*labels.Matcher{}, PosRange: mergeRanges(&yyDollar[1].item, &yyDollar[2].item), } } - case 80: + case 83: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:505 +//line generated_parser.y:522 { if yyDollar[1].matchers != nil { yyVAL.matchers = append(yyDollar[1].matchers, yyDollar[3].matcher) @@ -1372,196 +1401,196 @@ yydefault: yyVAL.matchers = yyDollar[1].matchers } } - case 81: + case 84: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:513 +//line generated_parser.y:530 { yyVAL.matchers = []*labels.Matcher{yyDollar[1].matcher} } - case 82: + case 85: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:515 +//line generated_parser.y:532 { yylex.(*parser).unexpected("label matching", "\",\" or \"}\"") yyVAL.matchers = yyDollar[1].matchers } - case 83: + case 86: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:519 +//line generated_parser.y:536 { yyVAL.matcher = yylex.(*parser).newLabelMatcher(yyDollar[1].item, yyDollar[2].item, yyDollar[3].item) } - case 84: + case 87: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:521 +//line generated_parser.y:538 { yylex.(*parser).unexpected("label matching", "string") yyVAL.matcher = nil } - case 85: + case 88: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:523 +//line generated_parser.y:540 { yylex.(*parser).unexpected("label matching", "label matching operator") yyVAL.matcher = nil } - case 86: + case 89: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:525 +//line generated_parser.y:542 { yylex.(*parser).unexpected("label matching", "identifier or \"}\"") yyVAL.matcher = nil } - case 87: + case 90: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:533 +//line generated_parser.y:550 { yyVAL.labels = append(yyDollar[2].labels, labels.Label{Name: labels.MetricName, Value: yyDollar[1].item.Val}) sort.Sort(yyVAL.labels) } - case 88: + case 91: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:535 +//line generated_parser.y:552 { yyVAL.labels = yyDollar[1].labels } - case 109: + case 112: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:542 +//line generated_parser.y:559 { yyVAL.labels = labels.New(yyDollar[2].labels...) } - case 110: + case 113: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:544 +//line generated_parser.y:561 { yyVAL.labels = labels.New(yyDollar[2].labels...) } - case 111: + case 114: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:546 +//line generated_parser.y:563 { yyVAL.labels = labels.New() } - case 112: + case 115: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:548 +//line generated_parser.y:565 { yyVAL.labels = labels.New() } - case 113: + case 116: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:552 +//line generated_parser.y:569 { yyVAL.labels = append(yyDollar[1].labels, yyDollar[3].label) } - case 114: + case 117: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:554 +//line generated_parser.y:571 { yyVAL.labels = []labels.Label{yyDollar[1].label} } - case 115: + case 118: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:556 +//line generated_parser.y:573 { yylex.(*parser).unexpected("label set", "\",\" or \"}\"") yyVAL.labels = yyDollar[1].labels } - case 116: + case 119: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:561 +//line generated_parser.y:578 { yyVAL.label = labels.Label{Name: yyDollar[1].item.Val, Value: yylex.(*parser).unquoteString(yyDollar[3].item.Val)} } - case 117: + case 120: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:563 +//line generated_parser.y:580 { yylex.(*parser).unexpected("label set", "string") yyVAL.label = labels.Label{} } - case 118: + case 121: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:565 +//line generated_parser.y:582 { yylex.(*parser).unexpected("label set", "\"=\"") yyVAL.label = labels.Label{} } - case 119: + case 122: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:567 +//line generated_parser.y:584 { yylex.(*parser).unexpected("label set", "identifier or \"}\"") yyVAL.label = labels.Label{} } - case 120: + case 123: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:575 +//line generated_parser.y:592 { yylex.(*parser).generatedParserResult = &seriesDescription{ labels: yyDollar[1].labels, values: yyDollar[2].series, } } - case 121: + case 124: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:584 +//line generated_parser.y:601 { yyVAL.series = []SequenceValue{} } - case 122: + case 125: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:586 +//line generated_parser.y:603 { yyVAL.series = append(yyDollar[1].series, yyDollar[3].series...) } - case 123: + case 126: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:588 +//line generated_parser.y:605 { yyVAL.series = yyDollar[1].series } - case 124: + case 127: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:590 +//line generated_parser.y:607 { yylex.(*parser).unexpected("series values", "") yyVAL.series = nil } - case 125: + case 128: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:594 +//line generated_parser.y:611 { yyVAL.series = []SequenceValue{{Omitted: true}} } - case 126: + case 129: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:596 +//line generated_parser.y:613 { yyVAL.series = []SequenceValue{} for i := uint64(0); i < yyDollar[3].uint; i++ { yyVAL.series = append(yyVAL.series, SequenceValue{Omitted: true}) } } - case 127: + case 130: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:603 +//line generated_parser.y:620 { yyVAL.series = []SequenceValue{{Value: yyDollar[1].float}} } - case 128: + case 131: yyDollar = yyS[yypt-3 : yypt+1] -//line generated_parser.y:605 +//line generated_parser.y:622 { yyVAL.series = []SequenceValue{} for i := uint64(0); i <= yyDollar[3].uint; i++ { yyVAL.series = append(yyVAL.series, SequenceValue{Value: yyDollar[1].float}) } } - case 129: + case 132: yyDollar = yyS[yypt-4 : yypt+1] -//line generated_parser.y:612 +//line generated_parser.y:629 { yyVAL.series = []SequenceValue{} for i := uint64(0); i <= yyDollar[4].uint; i++ { @@ -1569,45 +1598,45 @@ yydefault: yyDollar[1].float += yyDollar[2].float } } - case 130: + case 133: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:622 +//line generated_parser.y:639 { if yyDollar[1].item.Val != "stale" { yylex.(*parser).unexpected("series values", "number or \"stale\"") } yyVAL.float = math.Float64frombits(value.StaleNaN) } - case 175: + case 178: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:653 +//line generated_parser.y:670 { yyVAL.node = &NumberLiteral{ Val: yylex.(*parser).number(yyDollar[1].item.Val), PosRange: yyDollar[1].item.PositionRange(), } } - case 176: + case 179: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:661 +//line generated_parser.y:678 { yyVAL.float = yylex.(*parser).number(yyDollar[1].item.Val) } - case 177: + case 180: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:663 +//line generated_parser.y:680 { yyVAL.float = yyDollar[2].float } - case 178: + case 181: yyDollar = yyS[yypt-2 : yypt+1] -//line generated_parser.y:664 +//line generated_parser.y:681 { yyVAL.float = -yyDollar[2].float } - case 179: + case 184: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:668 +//line generated_parser.y:687 { var err error yyVAL.uint, err = strconv.ParseUint(yyDollar[1].item.Val, 10, 64) @@ -1615,9 +1644,9 @@ yydefault: yylex.(*parser).addParseErrf(yyDollar[1].item.PositionRange(), "invalid repetition in series values: %s", err) } } - case 180: + case 185: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:678 +//line generated_parser.y:697 { var err error yyVAL.duration, err = parseDuration(yyDollar[1].item.Val) @@ -1625,24 +1654,24 @@ yydefault: yylex.(*parser).addParseErr(yyDollar[1].item.PositionRange(), err) } } - case 181: + case 186: yyDollar = yyS[yypt-1 : yypt+1] -//line generated_parser.y:689 +//line generated_parser.y:708 { yyVAL.node = &StringLiteral{ Val: yylex.(*parser).unquoteString(yyDollar[1].item.Val), PosRange: yyDollar[1].item.PositionRange(), } } - case 182: + case 187: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:702 +//line generated_parser.y:721 { yyVAL.duration = 0 } - case 184: + case 189: yyDollar = yyS[yypt-0 : yypt+1] -//line generated_parser.y:706 +//line generated_parser.y:725 { yyVAL.strings = nil } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go index ada5d70d161..11ce35c54c7 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/lex.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/lex.go @@ -440,7 +440,8 @@ func lexStatements(l *Lexer) stateFn { } l.emit(RIGHT_BRACKET) l.bracketOpen = false - + case r == '@': + l.emit(AT) default: return l.errorf("unexpected character: %q", r) } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go index 99879445d8c..2960b14b28d 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/parse.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/parse.go @@ -15,6 +15,7 @@ package parser import ( "fmt" + "math" "os" "runtime" "strconv" @@ -26,6 +27,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/util/strutil" ) @@ -318,7 +320,7 @@ func (p *parser) Lex(lval *yySymType) int { case EOF: lval.item.Typ = EOF p.InjectItem(0) - case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION: + case RIGHT_BRACE, RIGHT_PAREN, RIGHT_BRACKET, DURATION, NUMBER: p.lastClosing = lval.item.Pos + Pos(len(lval.item.Val)) } @@ -583,6 +585,21 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.checkAST(n.VectorSelector) case *VectorSelector: + if n.Name != "" { + // In this case the last LabelMatcher is checking for the metric name + // set outside the braces. This checks if the name has already been set + // previously. + for _, m := range n.LabelMatchers[0 : len(n.LabelMatchers)-1] { + if m != nil && m.Name == labels.MetricName { + p.addParseErrf(n.PositionRange(), "metric name must not be set twice: %q or %q", n.Name, m.Value) + } + } + + // Skip the check for non-empty matchers because an explicit + // metric name is a non-empty matcher. + break + } + // A Vector selector must contain at least one non-empty matcher to prevent // implicit selection of all metrics (e.g. by a typo). notEmpty := false @@ -596,17 +613,6 @@ func (p *parser) checkAST(node Node) (typ ValueType) { p.addParseErrf(n.PositionRange(), "vector selector must contain at least one non-empty matcher") } - if n.Name != "" { - // In this case the last LabelMatcher is checking for the metric name - // set outside the braces. This checks if the name has already been set - // previously - for _, m := range n.LabelMatchers[0 : len(n.LabelMatchers)-1] { - if m != nil && m.Name == labels.MetricName { - p.addParseErrf(n.PositionRange(), "metric name must not be set twice: %q or %q", n.Name, m.Value) - } - } - } - case *NumberLiteral, *StringLiteral: // Nothing to do for terminals. @@ -676,34 +682,92 @@ func (p *parser) newLabelMatcher(label Item, operator Item, value Item) *labels. return m } +// addOffset is used to set the offset in the generated parser. func (p *parser) addOffset(e Node, offset time.Duration) { - var offsetp *time.Duration + var orgoffsetp *time.Duration var endPosp *Pos switch s := e.(type) { case *VectorSelector: - offsetp = &s.Offset + orgoffsetp = &s.OriginalOffset endPosp = &s.PosRange.End case *MatrixSelector: - if vs, ok := s.VectorSelector.(*VectorSelector); ok { - offsetp = &vs.Offset + vs, ok := s.VectorSelector.(*VectorSelector) + if !ok { + p.addParseErrf(e.PositionRange(), "ranges only allowed for vector selectors") + return } + orgoffsetp = &vs.OriginalOffset endPosp = &s.EndPos case *SubqueryExpr: - offsetp = &s.Offset + orgoffsetp = &s.OriginalOffset endPosp = &s.EndPos default: - p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant or range selector, but follows a %T instead", e) + p.addParseErrf(e.PositionRange(), "offset modifier must be preceded by an instant selector vector or range vector selector or a subquery") return } // it is already ensured by parseDuration func that there never will be a zero offset modifier - if *offsetp != 0 { + if *orgoffsetp != 0 { p.addParseErrf(e.PositionRange(), "offset may not be set multiple times") - } else if offsetp != nil { - *offsetp = offset + } else if orgoffsetp != nil { + *orgoffsetp = offset + } + + *endPosp = p.lastClosing +} + +// setTimestamp is used to set the timestamp from the @ modifier in the generated parser. +func (p *parser) setTimestamp(e Node, ts float64) { + if math.IsInf(ts, -1) || math.IsInf(ts, 1) || math.IsNaN(ts) || + ts >= float64(math.MaxInt64) || ts <= float64(math.MinInt64) { + p.addParseErrf(e.PositionRange(), "timestamp out of bounds for @ modifier: %f", ts) + } + var timestampp **int64 + var endPosp *Pos + + switch s := e.(type) { + case *VectorSelector: + timestampp = &s.Timestamp + endPosp = &s.PosRange.End + case *MatrixSelector: + vs, ok := s.VectorSelector.(*VectorSelector) + if !ok { + p.addParseErrf(e.PositionRange(), "ranges only allowed for vector selectors") + return + } + timestampp = &vs.Timestamp + endPosp = &s.EndPos + case *SubqueryExpr: + timestampp = &s.Timestamp + endPosp = &s.EndPos + default: + p.addParseErrf(e.PositionRange(), "@ modifier must be preceded by an instant selector vector or range vector selector or a subquery") + return + } + + if *timestampp != nil { + p.addParseErrf(e.PositionRange(), "@ may not be set multiple times") + } else if timestampp != nil { + *timestampp = new(int64) + **timestampp = timestamp.FromFloatSeconds(ts) } *endPosp = p.lastClosing +} +func MustLabelMatcher(mt labels.MatchType, name, val string) *labels.Matcher { + m, err := labels.NewMatcher(mt, name, val) + if err != nil { + panic(err) + } + return m +} + +func MustGetFunction(name string) *Function { + f, ok := getFunction(name) + if !ok { + panic(errors.Errorf("function %q does not exist", name)) + } + return f } diff --git a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go index eef4aa8e418..ba18a3db105 100644 --- a/vendor/github.com/prometheus/prometheus/promql/parser/printer.go +++ b/vendor/github.com/prometheus/prometheus/promql/parser/printer.go @@ -116,14 +116,24 @@ func (node *MatrixSelector) String() string { // Copy the Vector selector before changing the offset vecSelector := *node.VectorSelector.(*VectorSelector) offset := "" - if vecSelector.Offset != time.Duration(0) { - offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.Offset)) + if vecSelector.OriginalOffset != time.Duration(0) { + offset = fmt.Sprintf(" offset %s", model.Duration(vecSelector.OriginalOffset)) } + at := "" + if vecSelector.Timestamp != nil { + at = fmt.Sprintf(" @ %.3f", float64(*vecSelector.Timestamp)/1000.0) + } + + // Do not print the @ and offset twice. + offsetVal, atVal := vecSelector.OriginalOffset, vecSelector.Timestamp + vecSelector.OriginalOffset = 0 + vecSelector.Timestamp = nil + + str := fmt.Sprintf("%s[%s]%s%s", vecSelector.String(), model.Duration(node.Range), at, offset) - // Do not print the offset twice. - vecSelector.Offset = 0 + vecSelector.OriginalOffset, vecSelector.Timestamp = offsetVal, atVal - return fmt.Sprintf("%s[%s]%s", vecSelector.String(), model.Duration(node.Range), offset) + return str } func (node *SubqueryExpr) String() string { @@ -132,10 +142,14 @@ func (node *SubqueryExpr) String() string { step = model.Duration(node.Step).String() } offset := "" - if node.Offset != time.Duration(0) { - offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset)) + if node.OriginalOffset != time.Duration(0) { + offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) } - return fmt.Sprintf("%s[%s:%s]%s", node.Expr.String(), model.Duration(node.Range), step, offset) + at := "" + if node.Timestamp != nil { + at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) + } + return fmt.Sprintf("%s[%s:%s]%s%s", node.Expr.String(), model.Duration(node.Range), step, at, offset) } func (node *NumberLiteral) String() string { @@ -164,13 +178,17 @@ func (node *VectorSelector) String() string { labelStrings = append(labelStrings, matcher.String()) } offset := "" - if node.Offset != time.Duration(0) { - offset = fmt.Sprintf(" offset %s", model.Duration(node.Offset)) + if node.OriginalOffset != time.Duration(0) { + offset = fmt.Sprintf(" offset %s", model.Duration(node.OriginalOffset)) + } + at := "" + if node.Timestamp != nil { + at = fmt.Sprintf(" @ %.3f", float64(*node.Timestamp)/1000.0) } if len(labelStrings) == 0 { - return fmt.Sprintf("%s%s", node.Name, offset) + return fmt.Sprintf("%s%s%s", node.Name, at, offset) } sort.Strings(labelStrings) - return fmt.Sprintf("%s{%s}%s", node.Name, strings.Join(labelStrings, ","), offset) + return fmt.Sprintf("%s{%s}%s%s", node.Name, strings.Join(labelStrings, ","), at, offset) } diff --git a/vendor/github.com/prometheus/prometheus/promql/test.go b/vendor/github.com/prometheus/prometheus/promql/test.go index 7992c5b23a8..d02f2631756 100644 --- a/vendor/github.com/prometheus/prometheus/promql/test.go +++ b/vendor/github.com/prometheus/prometheus/promql/test.go @@ -27,6 +27,7 @@ import ( "github.com/prometheus/common/model" "github.com/prometheus/prometheus/pkg/labels" + "github.com/prometheus/prometheus/pkg/timestamp" "github.com/prometheus/prometheus/promql/parser" "github.com/prometheus/prometheus/storage" "github.com/prometheus/prometheus/tsdb" @@ -427,6 +428,74 @@ func (t *Test) Run() error { return nil } +type atModifierTestCase struct { + expr string + evalTime time.Time +} + +func atModifierTestCases(exprStr string, evalTime time.Time) ([]atModifierTestCase, error) { + expr, err := parser.ParseExpr(exprStr) + if err != nil { + return nil, err + } + ts := timestamp.FromTime(evalTime) + + containsNonStepInvariant := false + // Setting the @ timestamp for all selectors to be evalTime. + // If there is a subquery, then the selectors inside it don't get the @ timestamp. + // If any selector already has the @ timestamp set, then it is untouched. + parser.Inspect(expr, func(node parser.Node, path []parser.Node) error { + _, _, subqTs := subqueryTimes(path) + if subqTs != nil { + // There is a subquery with timestamp in the path, + // hence don't change any timestamps further. + return nil + } + switch n := node.(type) { + case *parser.VectorSelector: + if n.Timestamp == nil { + n.Timestamp = makeInt64Pointer(ts) + } + + case *parser.MatrixSelector: + if vs := n.VectorSelector.(*parser.VectorSelector); vs.Timestamp == nil { + vs.Timestamp = makeInt64Pointer(ts) + } + + case *parser.SubqueryExpr: + if n.Timestamp == nil { + n.Timestamp = makeInt64Pointer(ts) + } + + case *parser.Call: + _, ok := AtModifierUnsafeFunctions[n.Func.Name] + containsNonStepInvariant = containsNonStepInvariant || ok + } + return nil + }) + + if containsNonStepInvariant { + // Since there is a step invariant function, we cannot automatically + // generate step invariant test cases for it sanely. + return nil, nil + } + + newExpr := expr.String() // With all the @ evalTime set. + additionalEvalTimes := []int64{-10 * ts, 0, ts / 5, ts, 10 * ts} + if ts == 0 { + additionalEvalTimes = []int64{-1000, -ts, 1000} + } + testCases := make([]atModifierTestCase, 0, len(additionalEvalTimes)) + for _, et := range additionalEvalTimes { + testCases = append(testCases, atModifierTestCase{ + expr: newExpr, + evalTime: timestamp.Time(et), + }) + } + + return testCases, nil +} + // exec processes a single step of the test. func (t *Test) exec(tc testCommand) error { switch cmd := tc.(type) { @@ -445,59 +514,66 @@ func (t *Test) exec(tc testCommand) error { } case *evalCmd: - q, err := t.QueryEngine().NewInstantQuery(t.storage, cmd.expr, cmd.start) + queries, err := atModifierTestCases(cmd.expr, cmd.start) if err != nil { return err } - defer q.Close() - res := q.Exec(t.context) - if res.Err != nil { - if cmd.fail { - return nil + queries = append([]atModifierTestCase{{expr: cmd.expr, evalTime: cmd.start}}, queries...) + for _, iq := range queries { + q, err := t.QueryEngine().NewInstantQuery(t.storage, iq.expr, iq.evalTime) + if err != nil { + return err + } + defer q.Close() + res := q.Exec(t.context) + if res.Err != nil { + if cmd.fail { + continue + } + return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", iq.expr, cmd.line) + } + if res.Err == nil && cmd.fail { + return errors.Errorf("expected error evaluating query %q (line %d) but got none", iq.expr, cmd.line) + } + err = cmd.compareResult(res.Value) + if err != nil { + return errors.Wrapf(err, "error in %s %s", cmd, iq.expr) } - return errors.Wrapf(res.Err, "error evaluating query %q (line %d)", cmd.expr, cmd.line) - } - if res.Err == nil && cmd.fail { - return errors.Errorf("expected error evaluating query %q (line %d) but got none", cmd.expr, cmd.line) - } - - err = cmd.compareResult(res.Value) - if err != nil { - return errors.Wrapf(err, "error in %s %s", cmd, cmd.expr) - } - // Check query returns same result in range mode, - // by checking against the middle step. - q, err = t.queryEngine.NewRangeQuery(t.storage, cmd.expr, cmd.start.Add(-time.Minute), cmd.start.Add(time.Minute), time.Minute) - if err != nil { - return err - } - rangeRes := q.Exec(t.context) - if rangeRes.Err != nil { - return errors.Wrapf(rangeRes.Err, "error evaluating query %q (line %d) in range mode", cmd.expr, cmd.line) - } - defer q.Close() - if cmd.ordered { - // Ordering isn't defined for range queries. - return nil - } - mat := rangeRes.Value.(Matrix) - vec := make(Vector, 0, len(mat)) - for _, series := range mat { - for _, point := range series.Points { - if point.T == timeMilliseconds(cmd.start) { - vec = append(vec, Sample{Metric: series.Metric, Point: point}) - break + // Check query returns same result in range mode, + // by checking against the middle step. + q, err = t.queryEngine.NewRangeQuery(t.storage, iq.expr, iq.evalTime.Add(-time.Minute), iq.evalTime.Add(time.Minute), time.Minute) + if err != nil { + return err + } + rangeRes := q.Exec(t.context) + if rangeRes.Err != nil { + return errors.Wrapf(rangeRes.Err, "error evaluating query %q (line %d) in range mode", iq.expr, cmd.line) + } + defer q.Close() + if cmd.ordered { + // Ordering isn't defined for range queries. + continue + } + mat := rangeRes.Value.(Matrix) + vec := make(Vector, 0, len(mat)) + for _, series := range mat { + for _, point := range series.Points { + if point.T == timeMilliseconds(iq.evalTime) { + vec = append(vec, Sample{Metric: series.Metric, Point: point}) + break + } } } - } - if _, ok := res.Value.(Scalar); ok { - err = cmd.compareResult(Scalar{V: vec[0].Point.V}) - } else { - err = cmd.compareResult(vec) - } - if err != nil { - return errors.Wrapf(err, "error in %s %s (line %d) rande mode", cmd, cmd.expr, cmd.line) + if _, ok := res.Value.(Scalar); ok { + err = cmd.compareResult(Scalar{V: vec[0].Point.V}) + } else { + err = cmd.compareResult(vec) + } + if err != nil { + return errors.Wrapf(err, "error in %s %s (line %d) rande mode", cmd, iq.expr, cmd.line) + } + } default: @@ -524,6 +600,7 @@ func (t *Test) clear() { MaxSamples: 10000, Timeout: 100 * time.Second, NoStepSubqueryIntervalFn: func(int64) int64 { return durationMilliseconds(1 * time.Minute) }, + EnableAtModifier: true, } t.queryEngine = NewEngine(opts) @@ -633,10 +710,11 @@ func (ll *LazyLoader) clear() { ll.storage = teststorage.New(ll) opts := EngineOpts{ - Logger: nil, - Reg: nil, - MaxSamples: 10000, - Timeout: 100 * time.Second, + Logger: nil, + Reg: nil, + MaxSamples: 10000, + Timeout: 100 * time.Second, + EnableAtModifier: true, } ll.queryEngine = NewEngine(opts) @@ -701,3 +779,9 @@ func (ll *LazyLoader) Close() { ll.T.Fatalf("closing test storage: %s", err) } } + +func makeInt64Pointer(val int64) *int64 { + valp := new(int64) + *valp = val + return valp +} diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index a10178aeed6..e697d57d1e7 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -95,9 +95,11 @@ type ChunkQuerier interface { type LabelQuerier interface { // LabelValues returns all potential values for a label name. // It is not safe to use the strings beyond the lifefime of the querier. + // TODO(yeya24): support matchers or hints. LabelValues(name string) ([]string, Warnings, error) // LabelNames returns all the unique label names present in the block in sorted order. + // TODO(yeya24): support matchers or hints. LabelNames() ([]string, Warnings, error) // Close releases the resources of the Querier. diff --git a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go index 66d2e67b9be..5d3d5a4eabf 100644 --- a/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go +++ b/vendor/github.com/prometheus/prometheus/storage/remote/queue_manager.go @@ -538,13 +538,15 @@ func (t *QueueManager) StoreSeries(series []record.RefSeries, index int) { t.seriesMtx.Lock() defer t.seriesMtx.Unlock() for _, s := range series { + // Just make sure all the Refs of Series will insert into seriesSegmentIndexes map for tracking. + t.seriesSegmentIndexes[s.Ref] = index + ls := processExternalLabels(s.Labels, t.externalLabels) lbls := relabel.Process(ls, t.relabelConfigs...) if len(lbls) == 0 { t.droppedSeries[s.Ref] = struct{}{} continue } - t.seriesSegmentIndexes[s.Ref] = index t.internLabels(lbls) // We should not ever be replacing a series labels in the map, but just diff --git a/vendor/github.com/prometheus/prometheus/tsdb/README.md b/vendor/github.com/prometheus/prometheus/tsdb/README.md index 248004b9d39..adbc3d4ea00 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/README.md +++ b/vendor/github.com/prometheus/prometheus/tsdb/README.md @@ -16,4 +16,5 @@ A series of blog posts explaining different components of TSDB: * [The Head Block](https://ganeshvernekar.com/blog/prometheus-tsdb-the-head-block/) * [WAL and Checkpoint](https://ganeshvernekar.com/blog/prometheus-tsdb-wal-and-checkpoint/) * [Memory Mapping of Head Chunks from Disk](https://ganeshvernekar.com/blog/prometheus-tsdb-mmapping-head-chunks-from-disk/) -* [Persistent Block and its Index](https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/) \ No newline at end of file +* [Persistent Block and its Index](https://ganeshvernekar.com/blog/prometheus-tsdb-persistent-block-and-its-index/) +* [Queries](https://ganeshvernekar.com/blog/prometheus-tsdb-queries/) \ No newline at end of file diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 0cd05eb77fd..baf87f14008 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -40,6 +40,9 @@ type BlockWriter struct { chunkDir string } +// ErrNoSeriesAppended is returned if the series count is zero while flushing blocks. +var ErrNoSeriesAppended error = errors.New("no series appended, aborting") + // NewBlockWriter create a new block writer. // // The returned writer accumulates all the series in the Head block until `Flush` is called. @@ -88,7 +91,7 @@ func (w *BlockWriter) Appender(ctx context.Context) storage.Appender { func (w *BlockWriter) Flush(ctx context.Context) (ulid.ULID, error) { seriesCount := w.head.NumSeries() if w.head.NumSeries() == 0 { - return ulid.ULID{}, errors.New("no series appended, aborting") + return ulid.ULID{}, ErrNoSeriesAppended } mint := w.head.MinTime() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index d82c12d33f0..051b9b1a89f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -70,7 +70,7 @@ const ( DefaultWriteBufferSize = 4 * 1024 * 1024 // 4 MiB. ) -// corruptionErr is an error that's returned when corruption is encountered. +// CorruptionErr is an error that's returned when corruption is encountered. type CorruptionErr struct { Dir string FileIndex int diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 7c6e142166f..8a6f670c4d0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -214,6 +214,11 @@ func (c *LeveledCompactor) plan(dms []dirMeta) ([]string, error) { for i := len(dms) - 1; i >= 0; i-- { meta := dms[i].meta if meta.MaxTime-meta.MinTime < c.ranges[len(c.ranges)/2] { + // If the block is entirely deleted, then we don't care about the block being big enough. + // TODO: This is assuming single tombstone is for distinct series, which might be no true. + if meta.Stats.NumTombstones > 0 && meta.Stats.NumTombstones >= meta.Stats.NumSeries { + return []string{dms[i].dir}, nil + } break } if float64(meta.Stats.NumTombstones)/float64(meta.Stats.NumSeries+1) > 0.05 { @@ -678,7 +683,7 @@ func (c *LeveledCompactor) populateBlock(blocks []BlockReader, meta *BlockMeta, if i > 0 && b.Meta().MinTime < globalMaxt { c.metrics.overlappingBlocks.Inc() overlapping = true - level.Warn(c.logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) + level.Info(c.logger).Log("msg", "Found overlapping blocks during compaction", "ulid", meta.ULID) } if b.Meta().MaxTime > globalMaxt { globalMaxt = b.Meta().MaxTime diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index f63437fc6ae..3ddf73d21a5 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -22,7 +22,6 @@ import ( "math" "os" "path/filepath" - "runtime" "sort" "strconv" "strings" @@ -57,6 +56,8 @@ const ( // about removing those too on start to save space. Currently only blocks tmp dirs are removed. tmpForDeletionBlockDirSuffix = ".tmp-for-deletion" tmpForCreationBlockDirSuffix = ".tmp-for-creation" + // Pre-2.21 tmp dir suffix, used in clean-up functions. + tmpLegacy = ".tmp" ) var ( @@ -733,6 +734,12 @@ func (db *DB) run() { select { case <-time.After(1 * time.Minute): + db.cmtx.Lock() + if err := db.reloadBlocks(); err != nil { + level.Error(db.logger).Log("msg", "reloadBlocks", "err", err) + } + db.cmtx.Unlock() + select { case db.compactc <- struct{}{}: default: @@ -805,6 +812,7 @@ func (db *DB) Compact() (returnErr error) { ).Err() }() + start := time.Now() // Check whether we have pending head blocks that are ready to be persisted. // They have the highest priority. for { @@ -839,6 +847,14 @@ func (db *DB) Compact() (returnErr error) { return errors.Wrap(err, "WAL truncation in Compact") } + compactionDuration := time.Since(start) + if compactionDuration.Milliseconds() > db.head.chunkRange.Load() { + level.Warn(db.logger).Log( + "msg", "Head compaction took longer than the block time range, compactions are falling behind and won't be able to catch up", + "duration", compactionDuration.String(), + "block_range", db.head.chunkRange.Load(), + ) + } return db.compactBlocks() } @@ -865,7 +881,6 @@ func (db *DB) compactHead(head *RangeHead) error { return errors.Wrap(err, "persist head block") } - runtime.GC() if err := db.reloadBlocks(); err != nil { if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { return tsdb_errors.NewMulti( @@ -878,7 +893,6 @@ func (db *DB) compactHead(head *RangeHead) error { if err = db.head.truncateMemory(head.BlockMaxTime()); err != nil { return errors.Wrap(err, "head memory truncate") } - runtime.GC() return nil } @@ -905,7 +919,6 @@ func (db *DB) compactBlocks() (err error) { if err != nil { return errors.Wrapf(err, "compact %s", plan) } - runtime.GC() if err := db.reloadBlocks(); err != nil { if err := os.RemoveAll(filepath.Join(db.dir, uid.String())); err != nil { @@ -913,7 +926,6 @@ func (db *DB) compactBlocks() (err error) { } return errors.Wrap(err, "reloadBlocks blocks") } - runtime.GC() } return nil @@ -1560,7 +1572,7 @@ func isTmpBlockDir(fi os.FileInfo) bool { fn := fi.Name() ext := filepath.Ext(fn) - if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix { + if ext == tmpForDeletionBlockDirSuffix || ext == tmpForCreationBlockDirSuffix || ext == tmpLegacy { if _, err := ulid.ParseStrict(fn[:len(fn)-len(ext)]); err == nil { return true } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 342073dd6c2..bf54511a826 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -820,9 +820,22 @@ func (h *Head) truncateMemory(mint int64) (err error) { h.metrics.headTruncateTotal.Inc() start := time.Now() - h.gc() + actualMint := h.gc() level.Info(h.logger).Log("msg", "Head GC completed", "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) + if actualMint > h.minTime.Load() { + // The actual mint of the Head is higher than the one asked to truncate. + appendableMinValidTime := h.appendableMinValidTime() + if actualMint < appendableMinValidTime { + h.minTime.Store(actualMint) + h.minValidTime.Store(actualMint) + } else { + // The actual min time is in the appendable window. + // So we set the mint to the appendableMinValidTime. + h.minTime.Store(appendableMinValidTime) + h.minValidTime.Store(appendableMinValidTime) + } + } // Truncate the chunk m-mapper. if err := h.chunkDiskMapper.Truncate(mint); err != nil { @@ -997,6 +1010,13 @@ func (h *RangeHead) Meta() BlockMeta { } } +// String returns an human readable representation of the range head. It's important to +// keep this function in order to avoid the struct dump when the head is stringified in +// errors or logs. +func (h *RangeHead) String() string { + return fmt.Sprintf("range head (mint: %d, maxt: %d)", h.MinTime(), h.MaxTime()) +} + // initAppender is a helper to initialize the time bounds of the head // upon the first sample it receives. type initAppender struct { @@ -1054,10 +1074,8 @@ func (h *Head) appender() *headAppender { cleanupAppendIDsBelow := h.iso.lowWatermark() return &headAppender{ - head: h, - // Set the minimum valid time to whichever is greater the head min valid time or the compaction window. - // This ensures that no samples will be added within the compaction window to avoid races. - minValidTime: max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2), + head: h, + minValidTime: h.appendableMinValidTime(), mint: math.MaxInt64, maxt: math.MinInt64, samples: h.getAppendBuffer(), @@ -1067,6 +1085,12 @@ func (h *Head) appender() *headAppender { } } +func (h *Head) appendableMinValidTime() int64 { + // Setting the minimum valid time to whichever is greater, the head min valid time or the compaction window, + // ensures that no samples will be added within the compaction window to avoid races. + return max(h.minValidTime.Load(), h.MaxTime()-h.chunkRange.Load()/2) +} + func max(a, b int64) int64 { if a > b { return a @@ -1335,13 +1359,14 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { } // gc removes data before the minimum timestamp from the head. -func (h *Head) gc() { +// It returns the actual min times of the chunks present in the Head. +func (h *Head) gc() int64 { // Only data strictly lower than this timestamp must be deleted. mint := h.MinTime() // Drop old chunks and remember series IDs and hashes if they can be // deleted entirely. - deleted, chunksRemoved := h.series.gc(mint) + deleted, chunksRemoved, actualMint := h.series.gc(mint) seriesRemoved := len(deleted) h.metrics.seriesRemoved.Add(float64(seriesRemoved)) @@ -1382,6 +1407,8 @@ func (h *Head) gc() { panic(err) } h.symbols = symbols + + return actualMint } // Tombstones returns a new reader over the head's tombstones @@ -1472,6 +1499,13 @@ func (h *Head) Close() error { return errs.Err() } +// String returns an human readable representation of the TSDB head. It's important to +// keep this function in order to avoid the struct dump when the head is stringified in +// errors or logs. +func (h *Head) String() string { + return "head" +} + type headChunkReader struct { head *Head mint, maxt int64 @@ -1813,11 +1847,12 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // gc garbage collects old chunks that are strictly before mint and removes // series entirely that have no chunks left. -func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int) { +func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int, int64) { var ( - deleted = map[uint64]struct{}{} - deletedForCallback = []labels.Labels{} - rmChunks = 0 + deleted = map[uint64]struct{}{} + deletedForCallback = []labels.Labels{} + rmChunks = 0 + actualMint int64 = math.MaxInt64 ) // Run through all series and truncate old chunks. Mark those with no // chunks left as deleted and store their ID. @@ -1830,6 +1865,10 @@ func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int) { rmChunks += series.truncateChunksBefore(mint) if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit { + seriesMint := series.minTime() + if seriesMint < actualMint { + actualMint = seriesMint + } series.Unlock() continue } @@ -1864,7 +1903,11 @@ func (s *stripeSeries) gc(mint int64) (map[uint64]struct{}, int) { deletedForCallback = deletedForCallback[:0] } - return deleted, rmChunks + if actualMint == math.MaxInt64 { + actualMint = mint + } + + return deleted, rmChunks, actualMint } func (s *stripeSeries) getByID(id uint64) *memSeries { @@ -2086,26 +2129,25 @@ func (s *memSeries) chunkID(pos int) int { // have no timestamp at or after mint. // Chunk IDs remain unchanged. func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { - var k int if s.headChunk != nil && s.headChunk.maxTime < mint { // If head chunk is truncated, we can truncate all mmapped chunks. - k = 1 + len(s.mmappedChunks) - s.firstChunkID += k + removed = 1 + len(s.mmappedChunks) + s.firstChunkID += removed s.headChunk = nil s.mmappedChunks = nil - return k + return removed } if len(s.mmappedChunks) > 0 { for i, c := range s.mmappedChunks { if c.maxTime >= mint { break } - k = i + 1 + removed = i + 1 } - s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[k:]...) - s.firstChunkID += k + s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removed:]...) + s.firstChunkID += removed } - return k + return removed } // append adds the sample (t, v) to the series. The caller also has to provide diff --git a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go index 055f74118e4..a9048e4c62a 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/index/postings.go @@ -115,6 +115,7 @@ type PostingsStats struct { CardinalityLabelStats []Stat LabelValueStats []Stat LabelValuePairsStats []Stat + NumLabelPairs int } // Stats calculates the cardinality statistics from postings. @@ -128,6 +129,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats { labels := &maxHeap{} labelValueLength := &maxHeap{} labelValuePairs := &maxHeap{} + numLabelPairs := 0 metrics.init(maxNumOfRecords) labels.init(maxNumOfRecords) @@ -139,6 +141,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats { continue } labels.push(Stat{Name: n, Count: uint64(len(e))}) + numLabelPairs += len(e) size = 0 for name, values := range e { if n == label { @@ -157,6 +160,7 @@ func (p *MemPostings) Stats(label string) *PostingsStats { CardinalityLabelStats: labels.get(), LabelValueStats: labelValueLength.get(), LabelValuePairsStats: labelValuePairs.get(), + NumLabelPairs: numLabelPairs, } } diff --git a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go index 45669f69cb7..9963f52ec48 100644 --- a/vendor/github.com/prometheus/prometheus/web/api/v1/api.go +++ b/vendor/github.com/prometheus/prometheus/web/api/v1/api.go @@ -330,6 +330,12 @@ type queryData struct { Stats *stats.QueryStats `json:"stats,omitempty"` } +func invalidParamError(err error, parameter string) apiFuncResult { + return apiFuncResult{nil, &apiError{ + errorBadData, errors.Wrapf(err, "invalid parameter %q", parameter), + }, nil, nil} +} + func (api *API) options(r *http.Request) apiFuncResult { return apiFuncResult{nil, nil, nil, nil} } @@ -337,15 +343,14 @@ func (api *API) options(r *http.Request) apiFuncResult { func (api *API) query(r *http.Request) (result apiFuncResult) { ts, err := parseTimeParam(r, "time", api.now()) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "time") } ctx := r.Context() if to := r.FormValue("timeout"); to != "" { var cancel context.CancelFunc timeout, err := parseDuration(to) if err != nil { - err = errors.Wrapf(err, "invalid parameter 'timeout'") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "timeout") } ctx, cancel = context.WithTimeout(ctx, timeout) @@ -354,9 +359,9 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { qry, err := api.QueryEngine.NewInstantQuery(api.Queryable, r.FormValue("query"), ts) if err != nil { - err = errors.Wrapf(err, "invalid parameter 'query'") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "query") } + // From now on, we must only return with a finalizer in the result (to // be called by the caller) or call qry.Close ourselves (which is // required in the case of a panic). @@ -389,28 +394,23 @@ func (api *API) query(r *http.Request) (result apiFuncResult) { func (api *API) queryRange(r *http.Request) (result apiFuncResult) { start, err := parseTime(r.FormValue("start")) if err != nil { - err = errors.Wrapf(err, "invalid parameter 'start'") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "start") } end, err := parseTime(r.FormValue("end")) if err != nil { - err = errors.Wrapf(err, "invalid parameter 'end'") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "end") } if end.Before(start) { - err := errors.New("end timestamp must not be before start time") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(errors.New("end timestamp must not be before start time"), "end") } step, err := parseDuration(r.FormValue("step")) if err != nil { - err = errors.Wrapf(err, "invalid parameter 'step'") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "step") } if step <= 0 { - err := errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(errors.New("zero or negative query resolution step widths are not accepted. Try a positive integer"), "step") } // For safety, limit the number of returned points per timeseries. @@ -425,8 +425,7 @@ func (api *API) queryRange(r *http.Request) (result apiFuncResult) { var cancel context.CancelFunc timeout, err := parseDuration(to) if err != nil { - err = errors.Wrap(err, "invalid parameter 'timeout'") - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "timeout") } ctx, cancel = context.WithTimeout(ctx, timeout) @@ -486,11 +485,16 @@ func returnAPIError(err error) *apiError { func (api *API) labelNames(r *http.Request) apiFuncResult { start, err := parseTimeParam(r, "start", minTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "invalid parameter 'start'")}, nil, nil} + return invalidParamError(err, "start") } end, err := parseTimeParam(r, "end", maxTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "invalid parameter 'end'")}, nil, nil} + return invalidParamError(err, "end") + } + + matcherSets, err := parseMatchersParam(r.Form["match[]"]) + if err != nil { + return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) @@ -499,10 +503,46 @@ func (api *API) labelNames(r *http.Request) apiFuncResult { } defer q.Close() - names, warnings, err := q.LabelNames() - if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} + var ( + names []string + warnings storage.Warnings + ) + if len(matcherSets) > 0 { + hints := &storage.SelectHints{ + Start: timestamp.FromTime(start), + End: timestamp.FromTime(end), + Func: "series", // There is no series function, this token is used for lookups that don't need samples. + } + + labelNamesSet := make(map[string]struct{}) + // Get all series which match matchers. + for _, mset := range matcherSets { + s := q.Select(false, hints, mset...) + for s.Next() { + series := s.At() + for _, lb := range series.Labels() { + labelNamesSet[lb.Name] = struct{}{} + } + } + warnings = append(warnings, s.Warnings()...) + if err := s.Err(); err != nil { + return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} + } + } + + // Convert the map to an array. + names = make([]string, 0, len(labelNamesSet)) + for key := range labelNamesSet { + names = append(names, key) + } + sort.Strings(names) + } else { + names, warnings, err = q.LabelNames() + if err != nil { + return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} + } } + if names == nil { names = []string{} } @@ -519,11 +559,16 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { start, err := parseTimeParam(r, "start", minTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "invalid parameter 'start'")}, nil, nil} + return invalidParamError(err, "start") } end, err := parseTimeParam(r, "end", maxTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrap(err, "invalid parameter 'end'")}, nil, nil} + return invalidParamError(err, "end") + } + + matcherSets, err := parseMatchersParam(r.Form["match[]"]) + if err != nil { + return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} } q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) @@ -542,10 +587,49 @@ func (api *API) labelValues(r *http.Request) (result apiFuncResult) { q.Close() } - vals, warnings, err := q.LabelValues(name) - if err != nil { - return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} + var ( + vals []string + warnings storage.Warnings + ) + if len(matcherSets) > 0 { + hints := &storage.SelectHints{ + Start: timestamp.FromTime(start), + End: timestamp.FromTime(end), + Func: "series", // There is no series function, this token is used for lookups that don't need samples. + } + + labelValuesSet := make(map[string]struct{}) + // Get all series which match matchers. + for _, mset := range matcherSets { + s := q.Select(false, hints, mset...) + for s.Next() { + series := s.At() + labelValue := series.Labels().Get(name) + // Filter out empty value. + if labelValue == "" { + continue + } + labelValuesSet[labelValue] = struct{}{} + } + warnings = append(warnings, s.Warnings()...) + if err := s.Err(); err != nil { + return apiFuncResult{nil, &apiError{errorExec, err}, warnings, nil} + } + } + + // Convert the map to an array. + vals = make([]string, 0, len(labelValuesSet)) + for key := range labelValuesSet { + vals = append(vals, key) + } + sort.Strings(vals) + } else { + vals, warnings, err = q.LabelValues(name) + if err != nil { + return apiFuncResult{nil, &apiError{errorExec, err}, warnings, closer} + } } + if vals == nil { vals = []string{} } @@ -571,20 +655,16 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { start, err := parseTimeParam(r, "start", minTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "start") } end, err := parseTimeParam(r, "end", maxTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "end") } - var matcherSets [][]*labels.Matcher - for _, s := range r.Form["match[]"] { - matchers, err := parser.ParseMetricSelector(s) - if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} - } - matcherSets = append(matcherSets, matchers) + matcherSets, err := parseMatchersParam(r.Form["match[]"]) + if err != nil { + return invalidParamError(err, "match[]") } q, err := api.Queryable.Querier(r.Context(), timestamp.FromTime(start), timestamp.FromTime(end)) @@ -630,7 +710,7 @@ func (api *API) series(r *http.Request) (result apiFuncResult) { return apiFuncResult{metrics, nil, warnings, closer} } -func (api *API) dropSeries(r *http.Request) apiFuncResult { +func (api *API) dropSeries(_ *http.Request) apiFuncResult { return apiFuncResult{nil, &apiError{errorInternal, errors.New("not implemented")}, nil, nil} } @@ -808,20 +888,16 @@ func (api *API) targetMetadata(r *http.Request) apiFuncResult { } matchTarget := r.FormValue("match_target") - var matchers []*labels.Matcher - var err error - if matchTarget != "" { matchers, err = parser.ParseMetricSelector(matchTarget) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "match_target") } } metric := r.FormValue("metric") - res := []metricMetadata{} for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { @@ -955,7 +1031,6 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { } metric := r.FormValue("metric") - for _, tt := range api.targetRetriever(r.Context()).TargetsActive() { for _, t := range tt { @@ -988,7 +1063,6 @@ func (api *API) metricMetadata(r *http.Request) apiFuncResult { // Put the elements from the pseudo-set into a slice for marshaling. res := map[string][]metadata{} - for name, set := range metrics { if limit >= 0 && len(res) >= limit { break @@ -1056,15 +1130,14 @@ type recordingRule struct { func (api *API) rules(r *http.Request) apiFuncResult { ruleGroups := api.rulesRetriever(r.Context()).RuleGroups() res := &RuleDiscovery{RuleGroups: make([]*RuleGroup, len(ruleGroups))} - typeParam := strings.ToLower(r.URL.Query().Get("type")) + typ := strings.ToLower(r.URL.Query().Get("type")) - if typeParam != "" && typeParam != "alert" && typeParam != "record" { - err := errors.Errorf("invalid query parameter type='%v'", typeParam) - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + if typ != "" && typ != "alert" && typ != "record" { + return invalidParamError(errors.Errorf("not supported value %q", typ), "type") } - returnAlerts := typeParam == "" || typeParam == "alert" - returnRecording := typeParam == "" || typeParam == "record" + returnAlerts := typ == "" || typ == "alert" + returnRecording := typ == "" || typ == "record" for i, grp := range ruleGroups { apiRuleGroup := &RuleGroup{ @@ -1132,7 +1205,7 @@ type prometheusConfig struct { YAML string `json:"yaml"` } -func (api *API) serveRuntimeInfo(r *http.Request) apiFuncResult { +func (api *API) serveRuntimeInfo(_ *http.Request) apiFuncResult { status, err := api.runtimeInfo() if err != nil { return apiFuncResult{status, &apiError{errorInternal, err}, nil, nil} @@ -1140,18 +1213,18 @@ func (api *API) serveRuntimeInfo(r *http.Request) apiFuncResult { return apiFuncResult{status, nil, nil, nil} } -func (api *API) serveBuildInfo(r *http.Request) apiFuncResult { +func (api *API) serveBuildInfo(_ *http.Request) apiFuncResult { return apiFuncResult{api.buildInfo, nil, nil, nil} } -func (api *API) serveConfig(r *http.Request) apiFuncResult { +func (api *API) serveConfig(_ *http.Request) apiFuncResult { cfg := &prometheusConfig{ YAML: api.config().String(), } return apiFuncResult{cfg, nil, nil, nil} } -func (api *API) serveFlags(r *http.Request) apiFuncResult { +func (api *API) serveFlags(_ *http.Request) apiFuncResult { return apiFuncResult{api.flagsMap, nil, nil, nil} } @@ -1163,10 +1236,11 @@ type stat struct { // HeadStats has information about the TSDB head. type HeadStats struct { - NumSeries uint64 `json:"numSeries"` - ChunkCount int64 `json:"chunkCount"` - MinTime int64 `json:"minTime"` - MaxTime int64 `json:"maxTime"` + NumSeries uint64 `json:"numSeries"` + NumLabelPairs int `json:"numLabelPairs"` + ChunkCount int64 `json:"chunkCount"` + MinTime int64 `json:"minTime"` + MaxTime int64 `json:"maxTime"` } // tsdbStatus has information of cardinality statistics from postings. @@ -1208,10 +1282,11 @@ func (api *API) serveTSDBStatus(*http.Request) apiFuncResult { } return apiFuncResult{tsdbStatus{ HeadStats: HeadStats{ - NumSeries: s.NumSeries, - ChunkCount: chunkCount, - MinTime: s.MinTime, - MaxTime: s.MaxTime, + NumSeries: s.NumSeries, + ChunkCount: chunkCount, + MinTime: s.MinTime, + MaxTime: s.MaxTime, + NumLabelPairs: s.IndexPostingStats.NumLabelPairs, }, SeriesCountByMetricName: convertStats(s.IndexPostingStats.CardinalityMetricsStats), LabelValueCountByLabelName: convertStats(s.IndexPostingStats.CardinalityLabelStats), @@ -1441,17 +1516,17 @@ func (api *API) deleteSeries(r *http.Request) apiFuncResult { start, err := parseTimeParam(r, "start", minTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "start") } end, err := parseTimeParam(r, "end", maxTime) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "end") } for _, s := range r.Form["match[]"] { matchers, err := parser.ParseMetricSelector(s) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, err}, nil, nil} + return invalidParamError(err, "match[]") } if err := api.db.Delete(timestamp.FromTime(start), timestamp.FromTime(end), matchers...); err != nil { return apiFuncResult{nil, &apiError{errorInternal, err}, nil, nil} @@ -1472,7 +1547,7 @@ func (api *API) snapshot(r *http.Request) apiFuncResult { if r.FormValue("skip_head") != "" { skipHead, err = strconv.ParseBool(r.FormValue("skip_head")) if err != nil { - return apiFuncResult{nil, &apiError{errorBadData, errors.Wrapf(err, "unable to parse boolean 'skip_head' argument")}, nil, nil} + return invalidParamError(errors.Wrapf(err, "unable to parse boolean"), "skip_head") } } @@ -1618,6 +1693,28 @@ func parseDuration(s string) (time.Duration, error) { return 0, errors.Errorf("cannot parse %q to a valid duration", s) } +func parseMatchersParam(matchers []string) ([][]*labels.Matcher, error) { + var matcherSets [][]*labels.Matcher + for _, s := range matchers { + matchers, err := parser.ParseMetricSelector(s) + if err != nil { + return nil, err + } + matcherSets = append(matcherSets, matchers) + } + +OUTER: + for _, ms := range matcherSets { + for _, lm := range ms { + if lm != nil && !lm.Matches("") { + continue OUTER + } + } + return nil, errors.New("match[] must contain at least one non-empty matcher") + } + return matcherSets, nil +} + func marshalPointJSON(ptr unsafe.Pointer, stream *jsoniter.Stream) { p := *((*promql.Point)(ptr)) stream.WriteArrayStart() diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/block.go b/vendor/github.com/thanos-io/thanos/pkg/block/block.go index 33993bd446b..5ab3f7ea85d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/block.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/block.go @@ -98,12 +98,12 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st return errors.Wrap(err, "gather meta file stats") } - metaEncoded := bytes.Buffer{} + metaEncoded := strings.Builder{} if err := meta.Write(&metaEncoded); err != nil { return errors.Wrap(err, "encode meta file") } - if err := bkt.Upload(ctx, path.Join(DebugMetas, fmt.Sprintf("%s.json", id)), bytes.NewReader(metaEncoded.Bytes())); err != nil { + if err := bkt.Upload(ctx, path.Join(DebugMetas, fmt.Sprintf("%s.json", id)), strings.NewReader(metaEncoded.String())); err != nil { return cleanUp(logger, bkt, id, errors.Wrap(err, "upload debug meta file")) } @@ -116,8 +116,12 @@ func Upload(ctx context.Context, logger log.Logger, bkt objstore.Bucket, bdir st } // Meta.json always need to be uploaded as a last item. This will allow to assume block directories without meta file to be pending uploads. - if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), &metaEncoded); err != nil { - return cleanUp(logger, bkt, id, errors.Wrap(err, "upload meta file")) + if err := bkt.Upload(ctx, path.Join(id.String(), MetaFilename), strings.NewReader(metaEncoded.String())); err != nil { + // Don't call cleanUp here. Despite getting error, meta.json may have been uploaded in certain cases, + // and even though cleanUp will not see it yet, meta.json may appear in the bucket later. + // (Eg. S3 is known to behave this way when it returns 503 "SlowDown" error). + // If meta.json is not uploaded, this will produce partial blocks, but such blocks will be cleaned later. + return errors.Wrap(err, "upload meta file") } return nil @@ -164,12 +168,15 @@ func MarkForDeletion(ctx context.Context, logger log.Logger, bkt objstore.Bucket // Delete removes directory that is meant to be block directory. // NOTE: Always prefer this method for deleting blocks. -// * We have to delete block's files in the certain order (meta.json first) +// * We have to delete block's files in the certain order (meta.json first and deletion-mark.json last) // to ensure we don't end up with malformed partial blocks. Thanos system handles well partial blocks // only if they don't have meta.json. If meta.json is present Thanos assumes valid block. // * This avoids deleting empty dir (whole bucket) by mistake. func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid.ULID) error { metaFile := path.Join(id.String(), MetaFilename) + deletionMarkFile := path.Join(id.String(), metadata.DeletionMarkFilename) + + // Delete block meta file. ok, err := bkt.Exists(ctx, metaFile) if err != nil { return errors.Wrapf(err, "stat %s", metaFile) @@ -182,10 +189,30 @@ func Delete(ctx context.Context, logger log.Logger, bkt objstore.Bucket, id ulid level.Debug(logger).Log("msg", "deleted file", "file", metaFile, "bucket", bkt.Name()) } - // Delete the bucket, but skip the metaFile as we just deleted that. This is required for eventual object storages (list after write). - return deleteDirRec(ctx, logger, bkt, id.String(), func(name string) bool { - return name == metaFile + // Delete the block objects, but skip: + // - The metaFile as we just deleted. This is required for eventual object storages (list after write). + // - The deletionMarkFile as we'll delete it at last. + err = deleteDirRec(ctx, logger, bkt, id.String(), func(name string) bool { + return name == metaFile || name == deletionMarkFile }) + if err != nil { + return err + } + + // Delete block deletion mark. + ok, err = bkt.Exists(ctx, deletionMarkFile) + if err != nil { + return errors.Wrapf(err, "stat %s", deletionMarkFile) + } + + if ok { + if err := bkt.Delete(ctx, deletionMarkFile); err != nil { + return errors.Wrapf(err, "delete %s", deletionMarkFile) + } + level.Debug(logger).Log("msg", "deleted file", "file", deletionMarkFile, "bucket", bkt.Name()) + } + + return nil } // deleteDirRec removes all objects prefixed with dir from the bucket. It skips objects that return true for the passed keep function. diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go index 3a05e97eb34..fd0abe3b489 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/fetcher.go @@ -35,6 +35,8 @@ import ( "github.com/thanos-io/thanos/pkg/runutil" ) +const FetcherConcurrency = 32 + type fetcherMetrics struct { syncs prometheus.Counter syncFailures prometheus.Counter @@ -301,6 +303,7 @@ func (f *BaseFetcher) fetchMetadata(ctx context.Context) (interface{}, error) { ch = make(chan ulid.ULID, f.concurrency) mtx sync.Mutex ) + level.Debug(f.logger).Log("msg", "fetching meta data", "concurrency", f.concurrency) for i := 0; i < f.concurrency; i++ { eg.Go(func() error { for id := range ch { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go index 20ae1c5bc92..ccbf2f1b496 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/binary_reader.go @@ -15,6 +15,7 @@ import ( "os" "path/filepath" "sort" + "sync" "time" "unsafe" @@ -418,6 +419,8 @@ type postingOffset struct { tableOff int } +const valueSymbolsCacheSize = 1024 + type BinaryReader struct { b index.ByteSlice toc *BinaryTOC @@ -432,9 +435,17 @@ type BinaryReader struct { postingsV1 map[string]map[string]index.Range // Symbols struct that keeps only 1/postingOffsetsInMemSampling in the memory, then looks up the rest via mmap. - symbols *index.Symbols - nameSymbols map[uint32]string // Cache of the label name symbol lookups, + symbols *index.Symbols + // Cache of the label name symbol lookups, // as there are not many and they are half of all lookups. + nameSymbols map[uint32]string + // Direct cache of values. This is much faster than an LRU cache and still provides + // a reasonable cache hit ratio. + valueSymbolsMx sync.Mutex + valueSymbols [valueSymbolsCacheSize]struct { + index uint32 + symbol string + } dec *index.Decoder @@ -637,12 +648,12 @@ func newBinaryTOCFromByteSlice(bs index.ByteSlice) (*BinaryTOC, error) { }, nil } -func (r BinaryReader) IndexVersion() (int, error) { +func (r *BinaryReader) IndexVersion() (int, error) { return r.indexVersion, nil } // TODO(bwplotka): Get advantage of multi value offset fetch. -func (r BinaryReader) PostingsOffset(name string, value string) (index.Range, error) { +func (r *BinaryReader) PostingsOffset(name string, value string) (index.Range, error) { rngs, err := r.postingsOffset(name, value) if err != nil { return index.Range{}, err @@ -665,7 +676,7 @@ func skipNAndName(d *encoding.Decbuf, buf *int) { } d.Skip(*buf) } -func (r BinaryReader) postingsOffset(name string, values ...string) ([]index.Range, error) { +func (r *BinaryReader) postingsOffset(name string, values ...string) ([]index.Range, error) { rngs := make([]index.Range, 0, len(values)) if r.indexVersion == index.FormatV1 { e, ok := r.postingsV1[name] @@ -801,7 +812,16 @@ func (r BinaryReader) postingsOffset(name string, values ...string) ([]index.Ran return rngs, nil } -func (r BinaryReader) LookupSymbol(o uint32) (string, error) { +func (r *BinaryReader) LookupSymbol(o uint32) (string, error) { + cacheIndex := o % valueSymbolsCacheSize + r.valueSymbolsMx.Lock() + if cached := r.valueSymbols[cacheIndex]; cached.index == o && cached.symbol != "" { + v := cached.symbol + r.valueSymbolsMx.Unlock() + return v, nil + } + r.valueSymbolsMx.Unlock() + if s, ok := r.nameSymbols[o]; ok { return s, nil } @@ -812,10 +832,20 @@ func (r BinaryReader) LookupSymbol(o uint32) (string, error) { o += headerLen - index.HeaderLen } - return r.symbols.Lookup(o) + s, err := r.symbols.Lookup(o) + if err != nil { + return s, err + } + + r.valueSymbolsMx.Lock() + r.valueSymbols[cacheIndex].index = o + r.valueSymbols[cacheIndex].symbol = s + r.valueSymbolsMx.Unlock() + + return s, nil } -func (r BinaryReader) LabelValues(name string) ([]string, error) { +func (r *BinaryReader) LabelValues(name string) ([]string, error) { if r.indexVersion == index.FormatV1 { e, ok := r.postingsV1[name] if !ok { @@ -871,7 +901,7 @@ func yoloString(b []byte) string { return *((*string)(unsafe.Pointer(&b))) } -func (r BinaryReader) LabelNames() ([]string, error) { +func (r *BinaryReader) LabelNames() ([]string, error) { allPostingsKeyName, _ := index.AllPostingsKey() labelNames := make([]string, 0, len(r.postings)) for name := range r.postings { diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go index e9b9dc20bdc..d4b9dee03b6 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/lazy_binary_reader.go @@ -23,6 +23,11 @@ import ( "github.com/thanos-io/thanos/pkg/objstore" ) +var ( + errNotIdle = errors.New("the reader is not idle") + errUnloadedWhileLoading = errors.New("the index-header has been concurrently unloaded") +) + // LazyBinaryReaderMetrics holds metrics tracked by LazyBinaryReader. type LazyBinaryReaderMetrics struct { loadCount prometheus.Counter @@ -133,7 +138,8 @@ func (r *LazyBinaryReader) Close() error { defer r.onClosed(r) } - return r.unload() + // Unload without checking if idle. + return r.unloadIfIdleSince(0) } // IndexVersion implements Reader. @@ -203,7 +209,7 @@ func (r *LazyBinaryReader) LabelNames() ([]string, error) { // load ensures the underlying binary index-header reader has been successfully loaded. Returns // an error on failure. This function MUST be called with the read lock already acquired. -func (r *LazyBinaryReader) load() error { +func (r *LazyBinaryReader) load() (returnErr error) { // Nothing to do if we already tried loading it. if r.reader != nil { return nil @@ -216,8 +222,16 @@ func (r *LazyBinaryReader) load() error { // the read lock once done. r.readerMx.RUnlock() r.readerMx.Lock() - defer r.readerMx.RLock() - defer r.readerMx.Unlock() + defer func() { + r.readerMx.Unlock() + r.readerMx.RLock() + + // Between the write unlock and the subsequent read lock, the unload() may have run, + // so we make sure to catch this edge case. + if returnErr == nil && r.reader == nil { + returnErr = errUnloadedWhileLoading + } + }() // Ensure none else tried to load it in the meanwhile. if r.reader != nil { @@ -245,19 +259,22 @@ func (r *LazyBinaryReader) load() error { return nil } -// unload closes underlying BinaryReader. Calling this function on a already unloaded reader is a no-op. -func (r *LazyBinaryReader) unload() error { - // Always update the used timestamp so that the pool will not call unload() again until the next - // idle timeout is hit. - r.usedAt.Store(time.Now().UnixNano()) - +// unloadIfIdleSince closes underlying BinaryReader if the reader is idle since given time (as unix nano). If idleSince is 0, +// the check on the last usage is skipped. Calling this function on a already unloaded reader is a no-op. +func (r *LazyBinaryReader) unloadIfIdleSince(ts int64) error { r.readerMx.Lock() defer r.readerMx.Unlock() + // Nothing to do if already unloaded. if r.reader == nil { return nil } + // Do not unloadIfIdleSince if not idle. + if ts > 0 && r.usedAt.Load() > ts { + return errNotIdle + } + r.metrics.unloadCount.Inc() if err := r.reader.Close(); err != nil { r.metrics.unloadFailedCount.Inc() @@ -268,6 +285,16 @@ func (r *LazyBinaryReader) unload() error { return nil } -func (r *LazyBinaryReader) lastUsedAt() int64 { - return r.usedAt.Load() +// isIdleSince returns true if the reader is idle since given time (as unix nano). +func (r *LazyBinaryReader) isIdleSince(ts int64) bool { + if r.usedAt.Load() > ts { + return false + } + + // A reader can be considered idle only if it's loaded. + r.readerMx.RLock() + loaded := r.reader != nil + r.readerMx.RUnlock() + + return loaded } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go index 660ae4853a3..93f1fd88b37 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/indexheader/reader_pool.go @@ -11,6 +11,7 @@ import ( "github.com/go-kit/kit/log" "github.com/go-kit/kit/log/level" "github.com/oklog/ulid" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/thanos-io/thanos/pkg/objstore" @@ -98,29 +99,22 @@ func (p *ReaderPool) Close() { } func (p *ReaderPool) closeIdleReaders() { - for _, r := range p.getIdleReaders() { - // Closing an already closed reader is a no-op, so we close it and just update - // the last timestamp on success. If it will be still be idle the next time this - // function is called, we'll try to close it again and will just be a no-op. - // - // Due to concurrency, the current implementation may close a reader which was - // use between when the list of idle readers has been computed and now. This is - // an edge case we're willing to accept, to not further complicate the logic. - if err := r.unload(); err != nil { + idleTimeoutAgo := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano() + + for _, r := range p.getIdleReadersSince(idleTimeoutAgo) { + if err := r.unloadIfIdleSince(idleTimeoutAgo); err != nil && !errors.Is(err, errNotIdle) { level.Warn(p.logger).Log("msg", "failed to close idle index-header reader", "err", err) } } } -func (p *ReaderPool) getIdleReaders() []*LazyBinaryReader { +func (p *ReaderPool) getIdleReadersSince(ts int64) []*LazyBinaryReader { p.lazyReadersMx.Lock() defer p.lazyReadersMx.Unlock() var idle []*LazyBinaryReader - threshold := time.Now().Add(-p.lazyReaderIdleTimeout).UnixNano() - for r := range p.lazyReaders { - if r.lastUsedAt() < threshold { + if r.isIdleSince(ts) { idle = append(idle, r) } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go index 1046057039c..e27478deda8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/compact.go @@ -450,7 +450,7 @@ func (cg *Group) Resolution() int64 { // Planner returns blocks to compact. type Planner interface { - // Plan returns a block directories of blocks that should be compacted into single one. + // Plan returns a list of blocks that should be compacted into single one. // The blocks can be overlapping. The provided metadata has to be ordered by minTime. Plan(ctx context.Context, metasByMinTime []*metadata.Meta) ([]*metadata.Meta, error) } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index 909252aee9d..8d271b3ee6f 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -513,7 +513,7 @@ func downsampleAggrBatch(chks []*AggrChunk, buf *[]sample, resolution int64) (ch return chk, err } - // Handle counters by reading them properly. + // Handle counters by applying resets directly. acs := make([]chunkenc.Iterator, 0, len(chks)) for _, achk := range chks { c, err := achk.Get(AggrCounter) @@ -580,6 +580,7 @@ type sample struct { // It handles overlapped chunks (removes overlaps). // NOTE: It is important to deduplicate with care ensuring that you don't hit // issue https://github.com/thanos-io/thanos/issues/2401#issuecomment-621958839. +// NOTE(bwplotka): This hides resets from PromQL engine. This means it will not work for PromQL resets function. type ApplyCounterResetsSeriesIterator struct { chks []chunkenc.Iterator i int // Current chunk. diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/godns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/godns/resolver.go new file mode 100644 index 00000000000..a03bf87c94d --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/godns/resolver.go @@ -0,0 +1,25 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package godns + +import ( + "net" + + "github.com/pkg/errors" +) + +// Resolver is a wrapper for net.Resolver. +type Resolver struct { + *net.Resolver +} + +// IsNotFound checkout if DNS record is not found. +func (r *Resolver) IsNotFound(err error) bool { + if err == nil { + return false + } + err = errors.Cause(err) + dnsErr, ok := err.(*net.DNSError) + return ok && dnsErr.IsNotFound +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go index b9b95ce9088..f2fb3769c4d 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/lookup.go @@ -11,6 +11,8 @@ import ( "github.com/pkg/errors" ) +var ErrNoSuchHost = errors.New("no such host") + // Copied and slightly adjusted from Prometheus DNS SD: // https://github.com/prometheus/prometheus/blob/be3c082539d85908ce03b6d280f83343e7c930eb/discovery/dns/dns.go#L212 @@ -68,7 +70,7 @@ func (r *Resolver) lookupWithSearchPath(name string, qtype dns.Type) (*dns.Msg, if len(errs) == 0 { // Outcome 2: everyone says NXDOMAIN. - return &dns.Msg{}, nil + return &dns.Msg{}, ErrNoSuchHost } // Outcome 3: boned. return nil, errors.Errorf("could not resolve %q: all servers responded with errors to at least one search domain. Errs %s", name, fmtErrs(errs)) diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go index e62660f12c8..0348967c2ef 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns/resolver.go @@ -72,3 +72,7 @@ func (r *Resolver) LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, } return resp, nil } + +func (r *Resolver) IsNotFound(err error) bool { + return errors.Is(errors.Cause(err), ErrNoSuchHost) +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go index 2d11e1cf918..060d54c57dd 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/provider.go @@ -14,6 +14,7 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/thanos-io/thanos/pkg/discovery/dns/godns" "github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns" "github.com/thanos-io/thanos/pkg/errutil" "github.com/thanos-io/thanos/pkg/extprom" @@ -43,12 +44,12 @@ func (t ResolverType) ToResolver(logger log.Logger) ipLookupResolver { var r ipLookupResolver switch t { case GolangResolverType: - r = net.DefaultResolver + r = &godns.Resolver{Resolver: net.DefaultResolver} case MiekgdnsResolverType: r = &miekgdns.Resolver{ResolvConf: miekgdns.DefaultResolvConfPath} default: level.Warn(logger).Log("msg", "no such resolver type, defaulting to golang", "type", t) - r = net.DefaultResolver + r = &godns.Resolver{Resolver: net.DefaultResolver} } return r } @@ -108,7 +109,7 @@ func GetQTypeName(addr string) (qtype string, name string) { // Resolve stores a list of provided addresses or their DNS records if requested. // Addresses prefixed with `dns+` or `dnssrv+` will be resolved through respective DNS lookup (A/AAAA or SRV). -// defaultPort is used for non-SRV records when a port is not supplied. +// For non-SRV records, it will return an error if a port is not supplied. func (p *Provider) Resolve(ctx context.Context, addrs []string) error { resolvedAddrs := map[string][]string{} errs := errutil.MultiError{} diff --git a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go index 679834f7b22..7f8108ce00c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go +++ b/vendor/github.com/thanos-io/thanos/pkg/discovery/dns/resolver.go @@ -37,6 +37,7 @@ type Resolver interface { type ipLookupResolver interface { LookupIPAddr(ctx context.Context, host string) ([]net.IPAddr, error) LookupSRV(ctx context.Context, service, proto, name string) (cname string, addrs []*net.SRV, err error) + IsNotFound(err error) bool } type dnsSD struct { @@ -78,7 +79,7 @@ func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string // We exclude error from std Golang resolver for the case of the domain (e.g `NXDOMAIN`) not being found by DNS // server. Since `miekg` does not consider this as an error, when the host cannot be found, empty slice will be // returned. - if dnsErr, ok := err.(*net.DNSError); !ok || !dnsErr.IsNotFound { + if !s.resolver.IsNotFound(err) { return nil, errors.Wrapf(err, "lookup IP addresses %q", host) } if ips == nil { @@ -91,7 +92,12 @@ func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string case SRV, SRVNoA: _, recs, err := s.resolver.LookupSRV(ctx, "", "", host) if err != nil { - return nil, errors.Wrapf(err, "lookup SRV records %q", host) + if !s.resolver.IsNotFound(err) { + return nil, errors.Wrapf(err, "lookup SRV records %q", host) + } + if len(recs) == 0 { + level.Error(s.logger).Log("msg", "failed to lookup SRV records", "host", host, "err", err) + } } for _, rec := range recs { @@ -108,7 +114,12 @@ func (s *dnsSD) Resolve(ctx context.Context, name string, qtype QType) ([]string // Do A lookup for the domain in SRV answer. resIPs, err := s.resolver.LookupIPAddr(ctx, rec.Target) if err != nil { - return nil, errors.Wrapf(err, "look IP addresses %q", rec.Target) + if !s.resolver.IsNotFound(err) { + return nil, errors.Wrapf(err, "lookup IP addresses %q", host) + } + if len(resIPs) == 0 { + level.Error(s.logger).Log("msg", "failed to lookup IP addresses", "host", host, "err", err) + } } for _, resIP := range resIPs { res = append(res, appendScheme(scheme, net.JoinHostPort(resIP.String(), resPort))) diff --git a/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go b/vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go similarity index 100% rename from vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go.go rename to vendor/github.com/thanos-io/thanos/pkg/errutil/multierror.go diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go index eb679679805..edbe49d4249 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/s3/s3.go @@ -51,6 +51,11 @@ var DefaultConfig = Config{ HTTPConfig: HTTPConfig{ IdleConnTimeout: model.Duration(90 * time.Second), ResponseHeaderTimeout: model.Duration(2 * time.Minute), + TLSHandshakeTimeout: model.Duration(10 * time.Second), + ExpectContinueTimeout: model.Duration(1 * time.Second), + MaxIdleConns: 100, + MaxIdleConnsPerHost: 100, + MaxConnsPerHost: 0, }, // Minimum file size after which an HTTP multipart request should be used to upload objects to storage. // Set to 128 MiB as in the minio client. @@ -94,6 +99,12 @@ type HTTPConfig struct { ResponseHeaderTimeout model.Duration `yaml:"response_header_timeout"` InsecureSkipVerify bool `yaml:"insecure_skip_verify"` + TLSHandshakeTimeout model.Duration `yaml:"tls_handshake_timeout"` + ExpectContinueTimeout model.Duration `yaml:"expect_continue_timeout"` + MaxIdleConns int `yaml:"max_idle_conns"` + MaxIdleConnsPerHost int `yaml:"max_idle_conns_per_host"` + MaxConnsPerHost int `yaml:"max_conns_per_host"` + // Allow upstream callers to inject a round tripper Transport http.RoundTripper `yaml:"-"` } @@ -111,11 +122,12 @@ func DefaultTransport(config Config) *http.Transport { DualStack: true, }).DialContext, - MaxIdleConns: 100, - MaxIdleConnsPerHost: 100, + MaxIdleConns: config.HTTPConfig.MaxIdleConns, + MaxIdleConnsPerHost: config.HTTPConfig.MaxIdleConnsPerHost, IdleConnTimeout: time.Duration(config.HTTPConfig.IdleConnTimeout), - TLSHandshakeTimeout: 10 * time.Second, - ExpectContinueTimeout: 1 * time.Second, + MaxConnsPerHost: config.HTTPConfig.MaxConnsPerHost, + TLSHandshakeTimeout: time.Duration(config.HTTPConfig.TLSHandshakeTimeout), + ExpectContinueTimeout: time.Duration(config.HTTPConfig.ExpectContinueTimeout), // A custom ResponseHeaderTimeout was introduced // to cover cases where the tcp connection works but // the server never answers. Defaults to 2 minutes. diff --git a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go index 6214579c683..fbc832ed7bc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go +++ b/vendor/github.com/thanos-io/thanos/pkg/objstore/swift/swift.go @@ -9,69 +9,182 @@ import ( "fmt" "io" "os" + "strconv" "strings" "testing" + "time" "github.com/go-kit/kit/log" - "github.com/gophercloud/gophercloud" - "github.com/gophercloud/gophercloud/openstack" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" - "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" - "github.com/gophercloud/gophercloud/pagination" + "github.com/go-kit/kit/log/level" + "github.com/ncw/swift" "github.com/pkg/errors" + "github.com/prometheus/common/model" + "github.com/thanos-io/thanos/pkg/objstore" + "github.com/thanos-io/thanos/pkg/runutil" "gopkg.in/yaml.v2" +) - "github.com/thanos-io/thanos/pkg/objstore" +const ( + // DirDelim is the delimiter used to model a directory structure in an object store bucket. + DirDelim = '/' + // Name of the directory in bucket, where to store file parts of SLO and DLO. + SegmentsDir = "segments/" ) -// DirDelim is the delimiter used to model a directory structure in an object store bucket. -const DirDelim = "/" +var DefaultConfig = Config{ + AuthVersion: 0, // Means autodetect of the auth API version by the library. + ChunkSize: 1024 * 1024 * 1024, + Retries: 3, + ConnectTimeout: model.Duration(10 * time.Second), + Timeout: model.Duration(5 * time.Minute), +} + +type Config struct { + AuthVersion int `yaml:"auth_version"` + AuthUrl string `yaml:"auth_url"` + Username string `yaml:"username"` + UserDomainName string `yaml:"user_domain_name"` + UserDomainID string `yaml:"user_domain_id"` + UserId string `yaml:"user_id"` + Password string `yaml:"password"` + DomainId string `yaml:"domain_id"` + DomainName string `yaml:"domain_name"` + ProjectID string `yaml:"project_id"` + ProjectName string `yaml:"project_name"` + ProjectDomainID string `yaml:"project_domain_id"` + ProjectDomainName string `yaml:"project_domain_name"` + RegionName string `yaml:"region_name"` + ContainerName string `yaml:"container_name"` + ChunkSize int64 `yaml:"large_object_chunk_size"` + SegmentContainerName string `yaml:"large_object_segments_container_name"` + Retries int `yaml:"retries"` + ConnectTimeout model.Duration `yaml:"connect_timeout"` + Timeout model.Duration `yaml:"timeout"` + UseDynamicLargeObjects bool `yaml:"use_dynamic_large_objects"` +} + +func parseConfig(conf []byte) (*Config, error) { + sc := DefaultConfig + err := yaml.UnmarshalStrict(conf, &sc) + return &sc, err +} + +func configFromEnv() (*Config, error) { + c := swift.Connection{} + if err := c.ApplyEnvironment(); err != nil { + return nil, err + } + + config := Config{ + AuthVersion: c.AuthVersion, + AuthUrl: c.AuthUrl, + Password: c.ApiKey, + Username: c.UserName, + UserId: c.UserId, + DomainId: c.DomainId, + DomainName: c.Domain, + ProjectID: c.TenantId, + ProjectName: c.Tenant, + ProjectDomainID: c.TenantDomainId, + ProjectDomainName: c.TenantDomain, + RegionName: c.Region, + ContainerName: os.Getenv("OS_CONTAINER_NAME"), + ChunkSize: DefaultConfig.ChunkSize, + SegmentContainerName: os.Getenv("SWIFT_SEGMENTS_CONTAINER_NAME"), + Retries: c.Retries, + ConnectTimeout: model.Duration(c.ConnectTimeout), + Timeout: model.Duration(c.Timeout), + UseDynamicLargeObjects: false, + } + if os.Getenv("SWIFT_CHUNK_SIZE") != "" { + var err error + config.ChunkSize, err = strconv.ParseInt(os.Getenv("SWIFT_CHUNK_SIZE"), 10, 64) + if err != nil { + return nil, errors.Wrap(err, "parsing chunk size") + } + } + if strings.ToLower(os.Getenv("SWIFT_USE_DYNAMIC_LARGE_OBJECTS")) == "true" { + config.UseDynamicLargeObjects = true + } + return &config, nil +} -type SwiftConfig struct { - AuthUrl string `yaml:"auth_url"` - Username string `yaml:"username"` - UserDomainName string `yaml:"user_domain_name"` - UserDomainID string `yaml:"user_domain_id"` - UserId string `yaml:"user_id"` - Password string `yaml:"password"` - DomainId string `yaml:"domain_id"` - DomainName string `yaml:"domain_name"` - ProjectID string `yaml:"project_id"` - ProjectName string `yaml:"project_name"` - ProjectDomainID string `yaml:"project_domain_id"` - ProjectDomainName string `yaml:"project_domain_name"` - RegionName string `yaml:"region_name"` - ContainerName string `yaml:"container_name"` +func connectionFromConfig(sc *Config) *swift.Connection { + connection := swift.Connection{ + Domain: sc.DomainName, + DomainId: sc.DomainId, + UserName: sc.Username, + UserId: sc.UserId, + ApiKey: sc.Password, + AuthUrl: sc.AuthUrl, + Retries: sc.Retries, + Region: sc.RegionName, + AuthVersion: sc.AuthVersion, + Tenant: sc.ProjectName, + TenantId: sc.ProjectID, + TenantDomain: sc.ProjectDomainName, + TenantDomainId: sc.ProjectDomainID, + ConnectTimeout: time.Duration(sc.ConnectTimeout), + Timeout: time.Duration(sc.Timeout), + } + return &connection } type Container struct { - logger log.Logger - client *gophercloud.ServiceClient - name string + logger log.Logger + name string + connection *swift.Connection + chunkSize int64 + useDynamicLargeObjects bool + segmentsContainer string } func NewContainer(logger log.Logger, conf []byte) (*Container, error) { sc, err := parseConfig(conf) if err != nil { - return nil, err + return nil, errors.Wrap(err, "parse config") } + return NewContainerFromConfig(logger, sc, false) +} - provider, err := openstack.AuthenticatedClient(authOptsFromConfig(sc)) - if err != nil { - return nil, err +func ensureContainer(connection *swift.Connection, name string, createIfNotExist bool) error { + if _, _, err := connection.Container(name); err != nil { + if err != swift.ContainerNotFound { + return errors.Wrapf(err, "verify container %s", name) + } + if !createIfNotExist { + return fmt.Errorf("unable to find the expected container %s", name) + } + if err = connection.ContainerCreate(name, swift.Headers{}); err != nil { + return errors.Wrapf(err, "create container %s", name) + } + return nil } + return nil +} - client, err := openstack.NewObjectStorageV1(provider, gophercloud.EndpointOpts{ - Region: sc.RegionName, - }) - if err != nil { +func NewContainerFromConfig(logger log.Logger, sc *Config, createContainer bool) (*Container, error) { + connection := connectionFromConfig(sc) + if err := connection.Authenticate(); err != nil { + return nil, errors.Wrap(err, "authentication") + } + + if err := ensureContainer(connection, sc.ContainerName, createContainer); err != nil { + return nil, err + } + if sc.SegmentContainerName == "" { + sc.SegmentContainerName = sc.ContainerName + } else if err := ensureContainer(connection, sc.SegmentContainerName, createContainer); err != nil { return nil, err } return &Container{ - logger: logger, - client: client, - name: sc.ContainerName, + logger: logger, + name: sc.ContainerName, + connection: connection, + chunkSize: sc.ChunkSize, + useDynamicLargeObjects: sc.UseDynamicLargeObjects, + segmentsContainer: sc.SegmentContainerName, }, nil } @@ -82,215 +195,138 @@ func (c *Container) Name() string { // Iter calls f for each entry in the given directory. The argument to f is the full // object name including the prefix of the inspected directory. -func (c *Container) Iter(ctx context.Context, dir string, f func(string) error) error { - // Ensure the object name actually ends with a dir suffix. Otherwise we'll just iterate the - // object itself as one prefix item. +func (c *Container) Iter(_ context.Context, dir string, f func(string) error) error { if dir != "" { - dir = strings.TrimSuffix(dir, DirDelim) + DirDelim + dir = strings.TrimSuffix(dir, string(DirDelim)) + string(DirDelim) } - - options := &objects.ListOpts{Full: true, Prefix: dir, Delimiter: DirDelim} - return objects.List(c.client, c.name, options).EachPage(func(page pagination.Page) (bool, error) { - objectNames, err := objects.ExtractNames(page) + return c.connection.ObjectsWalk(c.name, &swift.ObjectsOpts{Prefix: dir, Delimiter: DirDelim}, func(opts *swift.ObjectsOpts) (interface{}, error) { + objects, err := c.connection.ObjectNames(c.name, opts) if err != nil { - return false, err + return objects, errors.Wrap(err, "list object names") } - for _, objectName := range objectNames { - if err := f(objectName); err != nil { - return false, err + for _, object := range objects { + if object == SegmentsDir { + continue + } + if err := f(object); err != nil { + return objects, errors.Wrap(err, "iteration over objects") } } - - return true, nil + return objects, nil }) } -// Get returns a reader for the given object name. -func (c *Container) Get(ctx context.Context, name string) (io.ReadCloser, error) { +func (c *Container) get(name string, headers swift.Headers, checkHash bool) (io.ReadCloser, error) { if name == "" { - return nil, errors.New("error, empty container name passed") + return nil, errors.New("object name cannot be empty") + } + file, _, err := c.connection.ObjectOpen(c.name, name, checkHash, headers) + if err != nil { + return nil, errors.Wrap(err, "open object") } - response := objects.Download(c.client, c.name, name, nil) - return response.Body, response.Err + return file, err } -// GetRange returns a new range reader for the given object name and range. -func (c *Container) GetRange(ctx context.Context, name string, off, length int64) (io.ReadCloser, error) { - lowerLimit := "" - upperLimit := "" - if off >= 0 { - lowerLimit = fmt.Sprintf("%d", off) - } - if length > 0 { - upperLimit = fmt.Sprintf("%d", off+length-1) - } - options := objects.DownloadOpts{ - Newest: true, - Range: fmt.Sprintf("bytes=%s-%s", lowerLimit, upperLimit), +// Get returns a reader for the given object name. +func (c *Container) Get(_ context.Context, name string) (io.ReadCloser, error) { + return c.get(name, swift.Headers{}, true) +} + +func (c *Container) GetRange(_ context.Context, name string, off, length int64) (io.ReadCloser, error) { + // Set Range HTTP header, see the docs https://docs.openstack.org/api-ref/object-store/?expanded=show-container-details-and-list-objects-detail,get-object-content-and-metadata-detail#id76. + bytesRange := fmt.Sprintf("bytes=%d-", off) + if length != -1 { + bytesRange = fmt.Sprintf("%s%d", bytesRange, off+length-1) } - response := objects.Download(c.client, c.name, name, options) - return response.Body, response.Err + return c.get(name, swift.Headers{"Range": bytesRange}, false) } // Attributes returns information about the specified object. -func (c *Container) Attributes(ctx context.Context, name string) (objstore.ObjectAttributes, error) { - response := objects.Get(c.client, c.name, name, nil) - headers, err := response.Extract() +func (c *Container) Attributes(_ context.Context, name string) (objstore.ObjectAttributes, error) { + if name == "" { + return objstore.ObjectAttributes{}, errors.New("object name cannot be empty") + } + info, _, err := c.connection.Object(c.name, name) if err != nil { - return objstore.ObjectAttributes{}, err + return objstore.ObjectAttributes{}, errors.Wrap(err, "get object attributes") } - return objstore.ObjectAttributes{ - Size: headers.ContentLength, - LastModified: headers.LastModified, + Size: info.Bytes, + LastModified: info.LastModified, }, nil } // Exists checks if the given object exists. -func (c *Container) Exists(ctx context.Context, name string) (bool, error) { - err := objects.Get(c.client, c.name, name, nil).Err - if err == nil { - return true, nil +func (c *Container) Exists(_ context.Context, name string) (bool, error) { + found := true + _, _, err := c.connection.Object(c.name, name) + if c.IsObjNotFoundErr(err) { + err = nil + found = false } - - if _, ok := err.(gophercloud.ErrDefault404); ok { - return false, nil - } - - return false, err + return found, err } // IsObjNotFoundErr returns true if error means that object is not found. Relevant to Get operations. func (c *Container) IsObjNotFoundErr(err error) bool { - _, ok := err.(gophercloud.ErrDefault404) - return ok + return errors.Is(err, swift.ObjectNotFound) } // Upload writes the contents of the reader as an object into the container. -func (c *Container) Upload(ctx context.Context, name string, r io.Reader) error { - options := &objects.CreateOpts{Content: r} - res := objects.Create(c.client, c.name, name, options) - return res.Err -} - -// Delete removes the object with the given name. -func (c *Container) Delete(ctx context.Context, name string) error { - return objects.Delete(c.client, c.name, name, nil).Err -} - -func (*Container) Close() error { - // Nothing to close. - return nil -} - -func parseConfig(conf []byte) (*SwiftConfig, error) { - var sc SwiftConfig - err := yaml.UnmarshalStrict(conf, &sc) - return &sc, err -} - -func authOptsFromConfig(sc *SwiftConfig) gophercloud.AuthOptions { - authOpts := gophercloud.AuthOptions{ - IdentityEndpoint: sc.AuthUrl, - Username: sc.Username, - UserID: sc.UserId, - Password: sc.Password, - DomainID: sc.DomainId, - DomainName: sc.DomainName, - TenantID: sc.ProjectID, - TenantName: sc.ProjectName, - - // Allow Gophercloud to re-authenticate automatically. - AllowReauth: true, - } - - // Support for cross-domain scoping (user in different domain than project). - // If a userDomainName or userDomainID is given, the user is scoped to this domain. - switch { - case sc.UserDomainName != "": - authOpts.DomainName = sc.UserDomainName - case sc.UserDomainID != "": - authOpts.DomainID = sc.UserDomainID +func (c *Container) Upload(_ context.Context, name string, r io.Reader) error { + size, err := objstore.TryToGetSize(r) + if err != nil { + level.Warn(c.logger).Log("msg", "could not guess file size, using large object to avoid issues if the file is larger than limit", "name", name, "err", err) + // Anything higher or equal to chunk size so the SLO is used. + size = c.chunkSize } - - // A token can be scoped to a domain or project. - // The project can be in another domain than the user, which is indicated by setting either projectDomainName or projectDomainID. - switch { - case sc.ProjectDomainName != "": - authOpts.Scope = &gophercloud.AuthScope{ - DomainName: sc.ProjectDomainName, + var file io.WriteCloser + if size >= c.chunkSize { + opts := swift.LargeObjectOpts{ + Container: c.name, + ObjectName: name, + ChunkSize: c.chunkSize, + SegmentContainer: c.segmentsContainer, + CheckHash: true, } - case sc.ProjectDomainID != "": - authOpts.Scope = &gophercloud.AuthScope{ - DomainID: sc.ProjectDomainID, + if c.useDynamicLargeObjects { + if file, err = c.connection.DynamicLargeObjectCreateFile(&opts); err != nil { + return errors.Wrap(err, "create DLO file") + } + } else { + if file, err = c.connection.StaticLargeObjectCreateFile(&opts); err != nil { + return errors.Wrap(err, "create SLO file") + } } - } - if authOpts.Scope != nil { - switch { - case sc.ProjectName != "": - authOpts.Scope.ProjectName = sc.ProjectName - case sc.ProjectID != "": - authOpts.Scope.ProjectID = sc.ProjectID + } else { + if file, err = c.connection.ObjectCreate(c.name, name, true, "", "", swift.Headers{}); err != nil { + return errors.Wrap(err, "create file") } } - return authOpts -} - -func (c *Container) createContainer(name string) error { - return containers.Create(c.client, name, nil).Err -} - -func (c *Container) deleteContainer(name string) error { - return containers.Delete(c.client, name).Err -} - -func configFromEnv() SwiftConfig { - c := SwiftConfig{ - AuthUrl: os.Getenv("OS_AUTH_URL"), - Username: os.Getenv("OS_USERNAME"), - Password: os.Getenv("OS_PASSWORD"), - RegionName: os.Getenv("OS_REGION_NAME"), - ContainerName: os.Getenv("OS_CONTAINER_NAME"), - ProjectID: os.Getenv("OS_PROJECT_ID"), - ProjectName: os.Getenv("OS_PROJECT_NAME"), - UserDomainID: os.Getenv("OS_USER_DOMAIN_ID"), - UserDomainName: os.Getenv("OS_USER_DOMAIN_NAME"), - ProjectDomainID: os.Getenv("OS_PROJECT_DOMAIN_ID"), - ProjectDomainName: os.Getenv("OS_PROJECT_DOMAIN_NAME"), + defer runutil.CloseWithLogOnErr(c.logger, file, "upload object close") + if _, err := io.Copy(file, r); err != nil { + return errors.Wrap(err, "uploading object") } + return nil +} - return c +// Delete removes the object with the given name. +func (c *Container) Delete(_ context.Context, name string) error { + return errors.Wrap(c.connection.LargeObjectDelete(c.name, name), "delete object") } -// validateForTests checks to see the config options for tests are set. -func validateForTests(conf SwiftConfig) error { - if conf.AuthUrl == "" || - conf.Username == "" || - conf.Password == "" || - (conf.ProjectName == "" && conf.ProjectID == "") || - conf.RegionName == "" { - return errors.New("insufficient swift test configuration information") - } +func (*Container) Close() error { + // Nothing to close. return nil } // NewTestContainer creates test objStore client that before returning creates temporary container. // In a close function it empties and deletes the container. func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) { - config := configFromEnv() - if err := validateForTests(config); err != nil { - return nil, nil, err - } - containerConfig, err := yaml.Marshal(config) + config, err := configFromEnv() if err != nil { - return nil, nil, err + return nil, nil, errors.Wrap(err, "loading config from ENV") } - - c, err := NewContainer(log.NewNopLogger(), containerConfig) - if err != nil { - return nil, nil, err - } - if config.ContainerName != "" { if os.Getenv("THANOS_ALLOW_EXISTING_BUCKET_USE") == "" { return nil, nil, errors.New("OS_CONTAINER_NAME is defined. Normally this tests will create temporary container " + @@ -299,30 +335,33 @@ func NewTestContainer(t testing.TB) (objstore.Bucket, func(), error) { "needs to be manually cleared. This means that it is only useful to run one test in a time. This is due " + "to safety (accidentally pointing prod container for test) as well as swift not being fully strong consistent.") } - + c, err := NewContainerFromConfig(log.NewNopLogger(), config, false) + if err != nil { + return nil, nil, errors.Wrap(err, "initializing new container") + } if err := c.Iter(context.Background(), "", func(f string) error { - return errors.Errorf("container %s is not empty", config.ContainerName) + return errors.Errorf("container %s is not empty", c.Name()) }); err != nil { - return nil, nil, errors.Wrapf(err, "swift check container %s", config.ContainerName) + return nil, nil, errors.Wrapf(err, "check container %s", c.Name()) } - - t.Log("WARNING. Reusing", config.ContainerName, "container for Swift tests. Manual cleanup afterwards is required") + t.Log("WARNING. Reusing", c.Name(), "container for Swift tests. Manual cleanup afterwards is required") return c, func() {}, nil } - - tmpContainerName := objstore.CreateTemporaryTestBucketName(t) - - if err := c.createContainer(tmpContainerName); err != nil { - return nil, nil, err + config.ContainerName = objstore.CreateTemporaryTestBucketName(t) + config.SegmentContainerName = config.ContainerName + c, err := NewContainerFromConfig(log.NewNopLogger(), config, true) + if err != nil { + return nil, nil, errors.Wrap(err, "initializing new container") } - - c.name = tmpContainerName - t.Log("created temporary container for swift tests with name", tmpContainerName) + t.Log("created temporary container for swift tests with name", c.Name()) return c, func() { objstore.EmptyBucket(t, context.Background(), c) - if err := c.deleteContainer(tmpContainerName); err != nil { - t.Logf("deleting container %s failed: %s", tmpContainerName, err) + if err := c.connection.ContainerDelete(c.name); err != nil { + t.Logf("deleting container %s failed: %s", c.Name(), err) + } + if err := c.connection.ContainerDelete(c.segmentsContainer); err != nil { + t.Logf("deleting segments container %s failed: %s", c.segmentsContainer, err) } }, nil } diff --git a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go index c20e5b162b2..4a4cc525579 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go +++ b/vendor/github.com/thanos-io/thanos/pkg/promclient/promclient.go @@ -152,7 +152,7 @@ func IsWALDirAccessible(dir string) error { return nil } -// ExternalLabels returns external labels from /api/v1/status/config Prometheus endpoint. +// ExternalLabels returns sorted external labels from /api/v1/status/config Prometheus endpoint. // Note that configuration can be hot reloadable on Prometheus, so this config might change in runtime. func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labels, error) { u := *base @@ -181,7 +181,10 @@ func (c *Client) ExternalLabels(ctx context.Context, base *url.URL) (labels.Labe if err := yaml.Unmarshal([]byte(d.Data.YAML), &cfg); err != nil { return nil, errors.Wrapf(err, "parse Prometheus config: %v", d.Data.YAML) } - return labels.FromMap(cfg.Global.ExternalLabels), nil + + lset := labels.FromMap(cfg.Global.ExternalLabels) + sort.Sort(lset) + return lset, nil } type Flags struct { @@ -650,12 +653,12 @@ func (c *Client) get2xxResultWithGRPCErrors(ctx context.Context, spanName string // SeriesInGRPC returns the labels from Prometheus series API. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []storepb.LabelMatcher, startTime, endTime int64) ([]map[string]string, error) { +func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []*labels.Matcher, startTime, endTime int64) ([]map[string]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/series") q := u.Query() - q.Add("match[]", storepb.MatchersToString(matchers...)) + q.Add("match[]", storepb.PromMatchersToString(matchers...)) q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) u.RawQuery = q.Encode() @@ -669,11 +672,14 @@ func (c *Client) SeriesInGRPC(ctx context.Context, base *url.URL, matchers []sto // LabelNames returns all known label names. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, startTime, endTime int64) ([]string, error) { +func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, matchers []storepb.LabelMatcher, startTime, endTime int64) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/labels") q := u.Query() + if len(matchers) > 0 { + q.Add("match[]", storepb.MatchersToString(matchers...)) + } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) u.RawQuery = q.Encode() @@ -686,11 +692,14 @@ func (c *Client) LabelNamesInGRPC(ctx context.Context, base *url.URL, startTime, // LabelValuesInGRPC returns all known label values for a given label name. It uses gRPC errors. // NOTE: This method is tested in pkg/store/prometheus_test.go against Prometheus. -func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, startTime, endTime int64) ([]string, error) { +func (c *Client) LabelValuesInGRPC(ctx context.Context, base *url.URL, label string, matchers []storepb.LabelMatcher, startTime, endTime int64) ([]string, error) { u := *base u.Path = path.Join(u.Path, "/api/v1/label/", label, "/values") q := u.Query() + if len(matchers) > 0 { + q.Add("match[]", storepb.MatchersToString(matchers...)) + } q.Add("start", formatTime(timestamp.Time(startTime))) q.Add("end", formatTime(timestamp.Time(endTime))) u.RawQuery = q.Encode() diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go index 1d0195c529f..3754e6264ad 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/bucket.go @@ -104,7 +104,7 @@ type bucketStoreMetrics struct { seriesMergeDuration prometheus.Histogram resultSeriesCount prometheus.Summary chunkSizeBytes prometheus.Histogram - queriesDropped prometheus.Counter + queriesDropped *prometheus.CounterVec seriesRefetches prometheus.Counter cachedPostingsCompressions *prometheus.CounterVec @@ -186,10 +186,10 @@ func newBucketStoreMetrics(reg prometheus.Registerer) *bucketStoreMetrics { }, }) - m.queriesDropped = promauto.With(reg).NewCounter(prometheus.CounterOpts{ + m.queriesDropped = promauto.With(reg).NewCounterVec(prometheus.CounterOpts{ Name: "thanos_bucket_store_queries_dropped_total", - Help: "Number of queries that were dropped due to the sample limit.", - }) + Help: "Number of queries that were dropped due to the limit.", + }, []string{"reason"}) m.seriesRefetches = promauto.With(reg).NewCounter(prometheus.CounterOpts{ Name: "thanos_bucket_store_series_refetches_total", Help: fmt.Sprintf("Total number of cases where %v bytes was not enough was to fetch series from index, resulting in refetch.", maxSeriesSize), @@ -276,6 +276,8 @@ type BucketStore struct { // chunksLimiterFactory creates a new limiter used to limit the number of chunks fetched by each Series() call. chunksLimiterFactory ChunksLimiterFactory + // seriesLimiterFactory creates a new limiter used to limit the number of touched series by each Series() call. + seriesLimiterFactory SeriesLimiterFactory partitioner partitioner filterConfig *FilterConfig @@ -300,6 +302,7 @@ func NewBucketStore( queryGate gate.Gate, maxChunkPoolBytes uint64, chunksLimiterFactory ChunksLimiterFactory, + seriesLimiterFactory SeriesLimiterFactory, debugLogging bool, blockSyncConcurrency int, filterConfig *FilterConfig, @@ -333,6 +336,7 @@ func NewBucketStore( filterConfig: filterConfig, queryGate: queryGate, chunksLimiterFactory: chunksLimiterFactory, + seriesLimiterFactory: seriesLimiterFactory, partitioner: gapBasedPartitioner{maxGapSize: partitionerMaxGapSize}, enableCompatibilityLabel: enableCompatibilityLabel, postingOffsetsInMemSampling: postingOffsetsInMemSampling, @@ -677,12 +681,13 @@ func (s *bucketSeriesSet) Err() error { } func blockSeries( - extLset map[string]string, + extLset labels.Labels, indexr *bucketIndexReader, chunkr *bucketChunkReader, matchers []*labels.Matcher, req *storepb.SeriesRequest, chunksLimiter ChunksLimiter, + seriesLimiter SeriesLimiter, ) (storepb.SeriesSet, *queryStats, error) { ps, err := indexr.ExpandedPostings(matchers) if err != nil { @@ -693,6 +698,11 @@ func blockSeries( return storepb.EmptySeriesSet(), indexr.stats, nil } + // Reserve series seriesLimiter + if err := seriesLimiter.Reserve(uint64(len(ps))); err != nil { + return nil, nil, errors.Wrap(err, "exceeded series limit") + } + // Preload all series index data. // TODO(bwplotka): Consider not keeping all series in memory all the time. // TODO(bwplotka): Do lazy loading in one step as `ExpandingPostings` method. @@ -703,51 +713,48 @@ func blockSeries( // Transform all series into the response types and mark their relevant chunks // for preloading. var ( - res []seriesEntry - lset labels.Labels - chks []chunks.Meta + res []seriesEntry + symbolizedLset []symbolizedLabel + lset labels.Labels + chks []chunks.Meta ) for _, id := range ps { - if err := indexr.LoadedSeries(id, &lset, &chks, req); err != nil { + ok, err := indexr.LoadSeriesForTime(id, &symbolizedLset, &chks, req.SkipChunks, req.MinTime, req.MaxTime) + if err != nil { return nil, nil, errors.Wrap(err, "read series") } - if len(chks) > 0 { - s := seriesEntry{lset: make(labels.Labels, 0, len(lset)+len(extLset))} - if !req.SkipChunks { - s.refs = make([]uint64, 0, len(chks)) - s.chks = make([]storepb.AggrChunk, 0, len(chks)) - for _, meta := range chks { - if err := chunkr.addPreload(meta.Ref); err != nil { - return nil, nil, errors.Wrap(err, "add chunk preload") - } - s.chks = append(s.chks, storepb.AggrChunk{ - MinTime: meta.MinTime, - MaxTime: meta.MaxTime, - }) - s.refs = append(s.refs, meta.Ref) - } + if !ok { + // No matching chunks for this time duration, skip series. + continue + } - // Reserve chunksLimiter if we save chunks. - if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { - return nil, nil, errors.Wrap(err, "exceeded chunks limit") + s := seriesEntry{} + if !req.SkipChunks { + // Schedule loading chunks. + s.refs = make([]uint64, 0, len(chks)) + s.chks = make([]storepb.AggrChunk, 0, len(chks)) + for _, meta := range chks { + if err := chunkr.addPreload(meta.Ref); err != nil { + return nil, nil, errors.Wrap(err, "add chunk preload") } + s.chks = append(s.chks, storepb.AggrChunk{ + MinTime: meta.MinTime, + MaxTime: meta.MaxTime, + }) + s.refs = append(s.refs, meta.Ref) } - for _, l := range lset { - // Skip if the external labels of the block overrule the series' label. - // NOTE(fabxc): maybe move it to a prefixed version to still ensure uniqueness of series? - if extLset[l.Name] != "" { - continue - } - s.lset = append(s.lset, l) - } - for ln, lv := range extLset { - s.lset = append(s.lset, labels.Label{Name: ln, Value: lv}) + // Ensure sample limit through chunksLimiter if we return chunks. + if err := chunksLimiter.Reserve(uint64(len(s.chks))); err != nil { + return nil, nil, errors.Wrap(err, "exceeded chunks limit") } - sort.Sort(s.lset) - - res = append(res, s) } + if err := indexr.LookupLabelsSymbols(symbolizedLset, &lset); err != nil { + return nil, nil, errors.Wrap(err, "Lookup labels symbols") + } + + s.lset = labelpb.ExtendSortedLabels(lset, extLset) + res = append(res, s) } if req.SkipChunks { @@ -771,7 +778,6 @@ func blockSeries( } } } - return newBucketSeriesSet(res), indexr.stats.merge(chunkr.stats), nil } @@ -871,7 +877,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie defer s.queryGate.Done() } - matchers, err := storepb.TranslateFromPromMatchers(req.Matchers...) + matchers, err := storepb.MatchersToPromMatchers(req.Matchers...) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } @@ -886,7 +892,8 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie g, gctx = errgroup.WithContext(ctx) resHints = &hintspb.SeriesResponseHints{} reqBlockMatchers []*labels.Matcher - chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped) + chunksLimiter = s.chunksLimiterFactory(s.metrics.queriesDropped.WithLabelValues("chunks")) + seriesLimiter = s.seriesLimiterFactory(s.metrics.queriesDropped.WithLabelValues("series")) ) if req.Hints != nil { @@ -895,7 +902,7 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie return status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal series request hints").Error()) } - reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...) if err != nil { return status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) } @@ -936,12 +943,13 @@ func (s *BucketStore) Series(req *storepb.SeriesRequest, srv storepb.Store_Serie g.Go(func() error { part, pstats, err := blockSeries( - b.meta.Thanos.Labels, + b.extLset, indexr, chunkr, blockMatchers, req, chunksLimiter, + seriesLimiter, ) if err != nil { return errors.Wrapf(err, "fetch series for block %s", b.meta.ULID) @@ -1080,7 +1088,7 @@ func (s *BucketStore) LabelNames(ctx context.Context, req *storepb.LabelNamesReq return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label names request hints").Error()) } - reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...) if err != nil { return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) } @@ -1164,7 +1172,7 @@ func (s *BucketStore) LabelValues(ctx context.Context, req *storepb.LabelValuesR return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "unmarshal label values request hints").Error()) } - reqBlockMatchers, err = storepb.TranslateFromPromMatchers(reqHints.BlockMatchers...) + reqBlockMatchers, err = storepb.MatchersToPromMatchers(reqHints.BlockMatchers...) if err != nil { return nil, status.Error(codes.InvalidArgument, errors.Wrap(err, "translate request hints labels matchers").Error()) } @@ -1367,6 +1375,7 @@ type bucketBlock struct { dir string indexCache storecache.IndexCache chunkPool pool.BytesPool + extLset labels.Labels indexHeaderReader indexheader.Reader @@ -1403,14 +1412,15 @@ func newBucketBlock( partitioner: p, meta: meta, indexHeaderReader: indexHeadReader, - } - - // Translate the block's labels and inject the block ID as a label - // to allow to match blocks also by ID. - b.relabelLabels = append(labels.FromMap(meta.Thanos.Labels), labels.Label{ - Name: block.BlockIDLabel, - Value: meta.ULID.String(), - }) + extLset: labels.FromMap(meta.Thanos.Labels), + // Translate the block's labels and inject the block ID as a label + // to allow to match blocks also by ID. + relabelLabels: append(labels.FromMap(meta.Thanos.Labels), labels.Label{ + Name: block.BlockIDLabel, + Value: meta.ULID.String(), + }), + } + sort.Sort(b.extLset) sort.Sort(b.relabelLabels) // Get object handles for all chunk files (segment files) from meta.json, if available. @@ -1456,15 +1466,15 @@ func (b *bucketBlock) readIndexRange(ctx context.Context, off, length int64) ([] } func (b *bucketBlock) readChunkRange(ctx context.Context, seq int, off, length int64) (*[]byte, error) { + if seq < 0 || seq >= len(b.chunkObjs) { + return nil, errors.Errorf("unknown segment file for index %d", seq) + } + c, err := b.chunkPool.Get(int(length)) if err != nil { return nil, errors.Wrap(err, "allocate chunk bytes") } - if seq < 0 || seq >= len(b.chunkObjs) { - return nil, errors.Errorf("unknown segment file for index %d", seq) - } - buf := bytes.NewBuffer(*c) r, err := b.bkt.GetRange(ctx, b.chunkObjs[seq], off, length) @@ -2070,20 +2080,25 @@ func (g gapBasedPartitioner) Partition(length int, rng func(int) (uint64, uint64 return parts } -// LoadedSeries populates the given labels and chunk metas for the series identified -// by the reference. -// Returns ErrNotFound if the ref does not resolve to a known series. -func (r *bucketIndexReader) LoadedSeries(ref uint64, lset *labels.Labels, chks *[]chunks.Meta, - req *storepb.SeriesRequest) error { +type symbolizedLabel struct { + name, value uint32 +} + +// LoadSeriesForTime populates the given symbolized labels for the series identified by the reference if at least one chunk is within +// time selection. +// LoadSeriesForTime also populates chunk metas slices if skipChunks if set to false. Chunks are also limited by the given time selection. +// LoadSeriesForTime returns false, when there are no series data for given time range. +// +// Error is returned on decoding error or if the reference does not resolve to a known series. +func (r *bucketIndexReader) LoadSeriesForTime(ref uint64, lset *[]symbolizedLabel, chks *[]chunks.Meta, skipChunks bool, mint, maxt int64) (ok bool, err error) { b, ok := r.loadedSeries[ref] if !ok { - return errors.Errorf("series %d not found", ref) + return false, errors.Errorf("series %d not found", ref) } r.stats.seriesTouched++ r.stats.seriesTouchedSizeSum += len(b) - - return r.decodeSeriesWithReq(b, lset, chks, req) + return decodeSeriesForTime(b, lset, chks, skipChunks, mint, maxt) } // Close released the underlying resources of the reader. @@ -2092,93 +2107,79 @@ func (r *bucketIndexReader) Close() error { return nil } -// decodeSeriesWithReq decodes a series entry from the given byte slice based on the SeriesRequest. -func (r *bucketIndexReader) decodeSeriesWithReq(b []byte, lbls *labels.Labels, chks *[]chunks.Meta, - req *storepb.SeriesRequest) error { +// LookupLabelsSymbols allows populates label set strings from symbolized label set. +func (r *bucketIndexReader) LookupLabelsSymbols(symbolized []symbolizedLabel, lbls *labels.Labels) error { *lbls = (*lbls)[:0] - *chks = (*chks)[:0] - - d := encoding.Decbuf{B: b} - - k := d.Uvarint() - - for i := 0; i < k; i++ { - lno := uint32(d.Uvarint()) - lvo := uint32(d.Uvarint()) - - if d.Err() != nil { - return errors.Wrap(d.Err(), "read series label offsets") - } - - ln, err := r.dec.LookupSymbol(lno) + for _, s := range symbolized { + ln, err := r.dec.LookupSymbol(s.name) if err != nil { return errors.Wrap(err, "lookup label name") } - lv, err := r.dec.LookupSymbol(lvo) + lv, err := r.dec.LookupSymbol(s.value) if err != nil { return errors.Wrap(err, "lookup label value") } - *lbls = append(*lbls, labels.Label{Name: ln, Value: lv}) } + return nil +} +// decodeSeriesForTime decodes a series entry from the given byte slice decoding only chunk metas that are within given min and max time. +// If skipChunks is specified decodeSeriesForTime does not return any chunks, but only labels and only if at least single chunk is within time range. +// decodeSeriesForTime returns false, when there are no series data for given time range. +func decodeSeriesForTime(b []byte, lset *[]symbolizedLabel, chks *[]chunks.Meta, skipChunks bool, selectMint, selectMaxt int64) (ok bool, err error) { + *lset = (*lset)[:0] + *chks = (*chks)[:0] + + d := encoding.Decbuf{B: b} + + // Read labels without looking up symbols. + k := d.Uvarint() + for i := 0; i < k; i++ { + lno := uint32(d.Uvarint()) + lvo := uint32(d.Uvarint()) + *lset = append(*lset, symbolizedLabel{name: lno, value: lvo}) + } // Read the chunks meta data. k = d.Uvarint() - if k == 0 { - return nil + return false, d.Err() } - t0 := d.Varint64() - maxt := int64(d.Uvarint64()) + t0 - ref0 := int64(d.Uvarint64()) + // First t0 is absolute, rest is just diff so different type is used (Uvarint64). + mint := d.Varint64() + maxt := int64(d.Uvarint64()) + mint + // Similar for first ref. + ref := int64(d.Uvarint64()) - // No chunk in the required time range. - if t0 > req.MaxTime { - return nil - } - - if req.MinTime <= maxt { - *chks = append(*chks, chunks.Meta{ - Ref: uint64(ref0), - MinTime: t0, - MaxTime: maxt, - }) - // Get a valid chunk, return if it is a skip chunk request. - if req.SkipChunks { - return nil + for i := 0; i < k; i++ { + if i > 0 { + mint += int64(d.Uvarint64()) + maxt = int64(d.Uvarint64()) + mint + ref += d.Varint64() } - } - t0 = maxt - - for i := 1; i < k; i++ { - mint := int64(d.Uvarint64()) + t0 - maxt := int64(d.Uvarint64()) + mint - ref0 += d.Varint64() - t0 = maxt - if maxt < req.MinTime { - continue - } - if mint > req.MaxTime { + if mint > selectMaxt { break } - if d.Err() != nil { - return errors.Wrapf(d.Err(), "read meta for chunk %d", i) - } - - *chks = append(*chks, chunks.Meta{ - Ref: uint64(ref0), - MinTime: mint, - MaxTime: maxt, - }) + if maxt >= selectMint { + // Found a chunk. + if skipChunks { + // We are not interested in chunks and we know there is at least one, that's enough to return series. + return true, nil + } - if req.SkipChunks { - return nil + *chks = append(*chks, chunks.Meta{ + Ref: uint64(ref), + MinTime: mint, + MaxTime: maxt, + }) } + + mint = maxt } - return d.Err() + return len(*chks) > 0, d.Err() } type bucketChunkReader struct { diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go b/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go index 5638f69e5f1..5712cd912f8 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/labelpb/label.go @@ -235,27 +235,35 @@ func (m *ZLabel) Compare(other ZLabel) int { return strings.Compare(m.Value, other.Value) } -// ExtendLabels extend given labels by extend in labels format. +// ExtendSortedLabels extend given labels by extend in labels format. // The type conversion is done safely, which means we don't modify extend labels underlying array. // // In case of existing labels already present in given label set, it will be overwritten by external one. -func ExtendLabels(lset labels.Labels, extend labels.Labels) labels.Labels { - overwritten := map[string]struct{}{} - for i, l := range lset { - if v := extend.Get(l.Name); v != "" { - lset[i].Value = v - overwritten[l.Name] = struct{}{} - } - } +// NOTE: Labels and extend has to be sorted. +func ExtendSortedLabels(lset labels.Labels, extend labels.Labels) labels.Labels { + ret := make(labels.Labels, 0, len(lset)+len(extend)) - for _, l := range extend { - if _, ok := overwritten[l.Name]; ok { - continue + // Inject external labels in place. + for len(lset) > 0 && len(extend) > 0 { + d := strings.Compare(lset[0].Name, extend[0].Name) + if d == 0 { + // Duplicate, prefer external labels. + // NOTE(fabxc): Maybe move it to a prefixed version to still ensure uniqueness of series? + ret = append(ret, extend[0]) + lset, extend = lset[1:], extend[1:] + } else if d < 0 { + ret = append(ret, lset[0]) + lset = lset[1:] + } else if d > 0 { + ret = append(ret, extend[0]) + extend = extend[1:] } - lset = append(lset, l) } - sort.Sort(lset) - return lset + + // Append all remaining elements. + ret = append(ret, lset...) + ret = append(ret, extend...) + return ret } func PromLabelSetsToString(lsets []labels.Labels) string { @@ -295,3 +303,28 @@ func DeepCopy(lbls []ZLabel) []ZLabel { } return ret } + +// ZLabelSets is a sortable list of ZLabelSet. It assumes the label pairs in each ZLabelSet element are already sorted. +type ZLabelSets []ZLabelSet + +func (z ZLabelSets) Len() int { return len(z) } + +func (z ZLabelSets) Swap(i, j int) { z[i], z[j] = z[j], z[i] } + +func (z ZLabelSets) Less(i, j int) bool { + l := 0 + r := 0 + var result int + lenI, lenJ := len(z[i].Labels), len(z[j].Labels) + for l < lenI && r < lenJ { + result = z[i].Labels[l].Compare(z[j].Labels[r]) + if result == 0 { + l++ + r++ + continue + } + return result < 0 + } + + return l == lenI +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go index c60be901e92..266dbbf3b2a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/limiter.go @@ -18,10 +18,20 @@ type ChunksLimiter interface { Reserve(num uint64) error } +type SeriesLimiter interface { + // Reserve num series out of the total number of series enforced by the limiter. + // Returns an error if the limit has been exceeded. This function must be + // goroutine safe. + Reserve(num uint64) error +} + // ChunksLimiterFactory is used to create a new ChunksLimiter. The factory is useful for // projects depending on Thanos (eg. Cortex) which have dynamic limits. type ChunksLimiterFactory func(failedCounter prometheus.Counter) ChunksLimiter +// SeriesLimiterFactory is used to create a new SeriesLimiter. +type SeriesLimiterFactory func(failedCounter prometheus.Counter) SeriesLimiter + // Limiter is a simple mechanism for checking if something has passed a certain threshold. type Limiter struct { limit uint64 @@ -57,3 +67,10 @@ func NewChunksLimiterFactory(limit uint64) ChunksLimiterFactory { return NewLimiter(limit, failedCounter) } } + +// NewSeriesLimiterFactory makes a new NewSeriesLimiterFactory with a static limit. +func NewSeriesLimiterFactory(limit uint64) SeriesLimiterFactory { + return func(failedCounter prometheus.Counter) SeriesLimiter { + return NewLimiter(limit, failedCounter) + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/local.go b/vendor/github.com/thanos-io/thanos/pkg/store/local.go index 1eee0ea2cac..a7a7583bdff 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/local.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/local.go @@ -151,22 +151,17 @@ func (s *LocalStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.I // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *LocalStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - match, newMatchers, err := matchesExternalLabels(r.Matchers, s.extLabels) + match, matchers, err := matchesExternalLabels(r.Matchers, s.extLabels) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } if !match { return nil } - if len(newMatchers) == 0 { + if len(matchers) == 0 { return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) } - matchers, err := storepb.TranslateFromPromMatchers(newMatchers...) - if err != nil { - return status.Error(codes.InvalidArgument, err.Error()) - } - var chosen []int for si, series := range s.series { lbls := labelpb.ZLabelsToPromLabels(series.Labels) diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go index 0e4210084a0..7239c4b3adc 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/prometheus.go @@ -43,13 +43,13 @@ import ( // PrometheusStore implements the store node API on top of the Prometheus remote read API. type PrometheusStore struct { - logger log.Logger - base *url.URL - client *promclient.Client - buffers sync.Pool - component component.StoreAPI - externalLabels func() labels.Labels - timestamps func() (mint int64, maxt int64) + logger log.Logger + base *url.URL + client *promclient.Client + buffers sync.Pool + component component.StoreAPI + externalLabelsFn func() labels.Labels + timestamps func() (mint int64, maxt int64) remoteReadAcceptableResponses []prompb.ReadRequest_ResponseType @@ -60,14 +60,14 @@ const initialBufSize = 32 * 1024 // 32KB seems like a good minimum starting size // NewPrometheusStore returns a new PrometheusStore that uses the given HTTP client // to talk to Prometheus. -// It attaches the provided external labels to all results. +// It attaches the provided external labels to all results. Provided external labels has to be sorted. func NewPrometheusStore( logger log.Logger, reg prometheus.Registerer, client *promclient.Client, baseURL *url.URL, component component.StoreAPI, - externalLabels func() labels.Labels, + externalLabelsFn func() labels.Labels, timestamps func() (mint int64, maxt int64), ) (*PrometheusStore, error) { if logger == nil { @@ -78,7 +78,7 @@ func NewPrometheusStore( base: baseURL, client: client, component: component, - externalLabels: externalLabels, + externalLabelsFn: externalLabelsFn, timestamps: timestamps, remoteReadAcceptableResponses: []prompb.ReadRequest_ResponseType{prompb.ReadRequest_STREAMED_XOR_CHUNKS, prompb.ReadRequest_SAMPLES}, buffers: sync.Pool{New: func() interface{} { @@ -100,7 +100,7 @@ func NewPrometheusStore( // NOTE(bwplotka): MaxTime & MinTime are not accurate nor adjusted dynamically. // This is fine for now, but might be needed in future. func (p *PrometheusStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.InfoResponse, error) { - lset := p.externalLabels() + lset := p.externalLabelsFn() mint, maxt := p.timestamps() res := &storepb.InfoResponse{ @@ -133,18 +133,16 @@ func (p *PrometheusStore) putBuffer(b *[]byte) { // Series returns all series for a requested time range and label matcher. func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_SeriesServer) error { - externalLabels := p.externalLabels() + extLset := p.externalLabelsFn() - match, newMatchers, err := matchesExternalLabels(r.Matchers, externalLabels) + match, matchers, err := matchesExternalLabels(r.Matchers, extLset) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } - if !match { return nil } - - if len(newMatchers) == 0 { + if len(matchers) == 0 { return status.Error(codes.InvalidArgument, "no matchers specified (excluding external labels)") } @@ -155,16 +153,16 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie } if r.SkipChunks { - labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, newMatchers, r.MinTime, r.MaxTime) + labelMaps, err := p.client.SeriesInGRPC(s.Context(), p.base, matchers, r.MinTime, r.MaxTime) if err != nil { return err } for _, lbm := range labelMaps { - lset := make([]labelpb.ZLabel, 0, len(lbm)+len(externalLabels)) + lset := make([]labelpb.ZLabel, 0, len(lbm)+len(extLset)) for k, v := range lbm { lset = append(lset, labelpb.ZLabel{Name: k, Value: v}) } - lset = append(lset, labelpb.ZLabelsFromPromLabels(externalLabels)...) + lset = append(lset, labelpb.ZLabelsFromPromLabels(extLset)...) sort.Slice(lset, func(i, j int) bool { return lset[i].Name < lset[j].Name }) @@ -176,18 +174,17 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie } q := &prompb.Query{StartTimestampMs: r.MinTime, EndTimestampMs: r.MaxTime} - - for _, m := range newMatchers { + for _, m := range matchers { pm := &prompb.LabelMatcher{Name: m.Name, Value: m.Value} switch m.Type { - case storepb.LabelMatcher_EQ: + case labels.MatchEqual: pm.Type = prompb.LabelMatcher_EQ - case storepb.LabelMatcher_NEQ: + case labels.MatchNotEqual: pm.Type = prompb.LabelMatcher_NEQ - case storepb.LabelMatcher_RE: + case labels.MatchRegexp: pm.Type = prompb.LabelMatcher_RE - case storepb.LabelMatcher_NRE: + case labels.MatchNotRegexp: pm.Type = prompb.LabelMatcher_NRE default: return errors.New("unrecognized matcher type") @@ -207,16 +204,16 @@ func (p *PrometheusStore) Series(r *storepb.SeriesRequest, s storepb.Store_Serie // remote read. contentType := httpResp.Header.Get("Content-Type") if strings.HasPrefix(contentType, "application/x-protobuf") { - return p.handleSampledPrometheusResponse(s, httpResp, queryPrometheusSpan, externalLabels) + return p.handleSampledPrometheusResponse(s, httpResp, queryPrometheusSpan, extLset) } if !strings.HasPrefix(contentType, "application/x-streamed-protobuf; proto=prometheus.ChunkedReadResponse") { return errors.Errorf("not supported remote read content type: %s", contentType) } - return p.handleStreamedPrometheusResponse(s, httpResp, queryPrometheusSpan, externalLabels) + return p.handleStreamedPrometheusResponse(s, httpResp, queryPrometheusSpan, extLset) } -func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_SeriesServer, httpResp *http.Response, querySpan opentracing.Span, externalLabels labels.Labels) error { +func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_SeriesServer, httpResp *http.Response, querySpan opentracing.Span, extLset labels.Labels) error { ctx := s.Context() level.Debug(p.logger).Log("msg", "started handling ReadRequest_SAMPLED response type.") @@ -232,7 +229,7 @@ func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_Series span.SetTag("series_count", len(resp.Results[0].Timeseries)) for _, e := range resp.Results[0].Timeseries { - lset := labelpb.ExtendLabels(labelpb.ZLabelsToPromLabels(e.Labels), externalLabels) + lset := labelpb.ExtendSortedLabels(labelpb.ZLabelsToPromLabels(e.Labels), extLset) if len(e.Samples) == 0 { // As found in https://github.com/thanos-io/thanos/issues/381 // Prometheus can give us completely empty time series. Ignore these with log until we figure out that @@ -262,7 +259,7 @@ func (p *PrometheusStore) handleSampledPrometheusResponse(s storepb.Store_Series return nil } -func (p *PrometheusStore) handleStreamedPrometheusResponse(s storepb.Store_SeriesServer, httpResp *http.Response, querySpan opentracing.Span, externalLabels labels.Labels) error { +func (p *PrometheusStore) handleStreamedPrometheusResponse(s storepb.Store_SeriesServer, httpResp *http.Response, querySpan opentracing.Span, extLset labels.Labels) error { level.Debug(p.logger).Log("msg", "started handling ReadRequest_STREAMED_XOR_CHUNKS streamed read response.") framesNum := 0 @@ -316,7 +313,7 @@ func (p *PrometheusStore) handleStreamedPrometheusResponse(s storepb.Store_Serie if err := s.Send(storepb.NewSeriesResponse(&storepb.Series{ Labels: labelpb.ZLabelsFromPromLabels( - labelpb.ExtendLabels(labelpb.ZLabelsToPromLabels(series.Labels), externalLabels), + labelpb.ExtendSortedLabels(labelpb.ZLabelsToPromLabels(series.Labels), extLset), ), Chunks: thanosChks, })); err != nil { @@ -377,8 +374,8 @@ func (p *PrometheusStore) chunkSamples(series *prompb.TimeSeries, maxSamplesPerC } chks = append(chks, storepb.AggrChunk{ - MinTime: int64(samples[0].Timestamp), - MaxTime: int64(samples[chunkSize-1].Timestamp), + MinTime: samples[0].Timestamp, + MaxTime: samples[chunkSize-1].Timestamp, Raw: &storepb.Chunk{Type: enc, Data: cb}, }) @@ -434,25 +431,26 @@ func (p *PrometheusStore) startPromRemoteRead(ctx context.Context, q *prompb.Que return presp, nil } -// matchesExternalLabels filters out external labels matching from matcher if exists as the local storage does not have them. -// It also returns false if given matchers are not matching external labels. -func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labels) (bool, []storepb.LabelMatcher, error) { - if len(externalLabels) == 0 { - return true, ms, nil - } - - tms, err := storepb.TranslateFromPromMatchers(ms...) +// matchesExternalLabels returns false if given matchers are not matching external labels. +// If true, matchesExternalLabels also returns Prometheus matchers without those matching external labels. +func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labels) (bool, []*labels.Matcher, error) { + tms, err := storepb.MatchersToPromMatchers(ms...) if err != nil { return false, nil, err } - var newMatcher []storepb.LabelMatcher + if len(externalLabels) == 0 { + return true, tms, nil + } + + var newMatchers []*labels.Matcher for i, tm := range tms { // Validate all matchers. extValue := externalLabels.Get(tm.Name) if extValue == "" { // Agnostic to external labels. - newMatcher = append(newMatcher, ms[i]) + tms = append(tms[:i], tms[i:]...) + newMatchers = append(newMatchers, tm) continue } @@ -462,8 +460,7 @@ func matchesExternalLabels(ms []storepb.LabelMatcher, externalLabels labels.Labe return false, nil, nil } } - - return true, newMatcher, nil + return true, newMatchers, nil } // encodeChunk translates the sample pairs into a chunk. @@ -483,7 +480,7 @@ func (p *PrometheusStore) encodeChunk(ss []prompb.Sample) (storepb.Chunk_Encodin // LabelNames returns all known label names. func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesRequest) (*storepb.LabelNamesResponse, error) { - lbls, err := p.client.LabelNamesInGRPC(ctx, p.base, r.Start, r.End) + lbls, err := p.client.LabelNamesInGRPC(ctx, p.base, nil, r.Start, r.End) if err != nil { return nil, err } @@ -492,14 +489,14 @@ func (p *PrometheusStore) LabelNames(ctx context.Context, r *storepb.LabelNamesR // LabelValues returns all known label values for a given label name. func (p *PrometheusStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequest) (*storepb.LabelValuesResponse, error) { - externalLset := p.externalLabels() + externalLset := p.externalLabelsFn() // First check for matching external label which has priority. if l := externalLset.Get(r.Label); l != "" { return &storepb.LabelValuesResponse{Values: []string{l}}, nil } - vals, err := p.client.LabelValuesInGRPC(ctx, p.base, r.Label, r.Start, r.End) + vals, err := p.client.LabelValuesInGRPC(ctx, p.base, r.Label, nil, r.Start, r.End) if err != nil { return nil, err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go index 4bb9f2f1a53..4391a36619c 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/proxy.go @@ -145,7 +145,7 @@ func (s *ProxyStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.I labelSets := make(map[uint64]labelpb.ZLabelSet, len(stores)) for _, st := range stores { for _, lset := range st.LabelSets() { - mergedLabelSet := labelpb.ExtendLabels(lset, s.selectorLabels) + mergedLabelSet := labelpb.ExtendSortedLabels(lset, s.selectorLabels) labelSets[mergedLabelSet.Hash()] = labelpb.ZLabelSet{Labels: labelpb.ZLabelsFromPromLabels(mergedLabelSet)} } } @@ -188,24 +188,27 @@ func (s cancelableRespSender) send(r *storepb.SeriesResponse) { // Series returns all series for a requested time range and label matcher. Requested series are taken from other // stores and proxied to RPC client. NOTE: Resulted data are not trimmed exactly to min and max time range. func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - match, newMatchers, err := matchesExternalLabels(r.Matchers, s.selectorLabels) + // TODO(bwplotka): This should be part of request logger, otherwise it does not make much sense. Also, could be + // tiggered by tracing span to reduce cognitive load. + reqLogger := log.With(s.logger, "component", "proxy", "request", r.String()) + + match, matchers, err := matchesExternalLabels(r.Matchers, s.selectorLabels) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } if !match { return nil } - - if len(newMatchers) == 0 { - return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) + if len(matchers) == 0 { + return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding selector labels)").Error()) } + storeMatchers, _ := storepb.PromMatchersToMatchers(matchers...) // Error would be returned by matchesExternalLabels, so skip check. g, gctx := errgroup.WithContext(srv.Context()) // Allow to buffer max 10 series response. // Each might be quite large (multi chunk long series given by sidecar). respSender, respCh := newCancelableRespChannel(gctx, 10) - g.Go(func() error { // This go routine is responsible for calling store's Series concurrently. Merged results // are passed to respCh and sent concurrently to client (if buffer of 10 have room). @@ -217,7 +220,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe r = &storepb.SeriesRequest{ MinTime: r.MinTime, MaxTime: r.MaxTime, - Matchers: newMatchers, + Matchers: storeMatchers, Aggregates: r.Aggregates, MaxResolutionWindow: r.MaxResolutionWindow, SkipChunks: r.SkipChunks, @@ -232,24 +235,12 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe }() for _, st := range s.stores() { - // We might be able to skip the store if its meta information indicates - // it cannot have series matching our query. - // NOTE: all matchers are validated in matchesExternalLabels method so we explicitly ignore error. - var ok bool - tracing.DoInSpan(gctx, "store_matches", func(ctx context.Context) { - var storeDebugMatcher [][]*labels.Matcher - if ctxVal := srv.Context().Value(StoreMatcherKey); ctxVal != nil { - if value, ok := ctxVal.([][]*labels.Matcher); ok { - storeDebugMatcher = value - } - } - // We can skip error, we already translated matchers once. - ok, _ = storeMatches(st, r.MinTime, r.MaxTime, storeDebugMatcher, r.Matchers...) - }) - if !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out", st)) + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. + if ok, reason := storeMatches(gctx, st, r.MinTime, r.MaxTime, matchers...); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("store %s filtered out: %v", st, reason)) continue } + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) // This is used to cancel this stream when one operations takes too long. @@ -267,7 +258,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe } err = errors.Wrapf(err, "fetch series for %s %s", storeID, st) if r.PartialResponseDisabled { - level.Error(s.logger).Log("err", err, "msg", "partial response disabled; aborting request") + level.Error(reqLogger).Log("err", err, "msg", "partial response disabled; aborting request") return err } respSender.send(storepb.NewWarnSeriesResponse(err)) @@ -276,15 +267,16 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe // Schedule streamSeriesSet that translates gRPC streamed response // into seriesSet (if series) or respCh if warnings. - seriesSet = append(seriesSet, startStreamSeriesSet(seriesCtx, s.logger, closeSeries, + seriesSet = append(seriesSet, startStreamSeriesSet(seriesCtx, reqLogger, closeSeries, wg, sc, respSender, st.String(), !r.PartialResponseDisabled, s.responseTimeout, s.metrics.emptyStreamResponses)) } - level.Debug(s.logger).Log("msg", strings.Join(storeDebugMsgs, ";")) + level.Debug(reqLogger).Log("msg", "Series: started fanout streams", "status", strings.Join(storeDebugMsgs, ";")) + if len(seriesSet) == 0 { // This is indicates that configured StoreAPIs are not the ones end user expects. err := errors.New("No StoreAPIs matched for this query") - level.Warn(s.logger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) + level.Warn(reqLogger).Log("err", err, "stores", strings.Join(storeDebugMsgs, ";")) respSender.send(storepb.NewWarnSeriesResponse(err)) return nil } @@ -312,7 +304,7 @@ func (s *ProxyStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSe }) if err := g.Wait(); err != nil { // TODO(bwplotka): Replace with request logger. - level.Error(s.logger).Log("err", err) + level.Error(reqLogger).Log("err", err) return err } return nil @@ -483,44 +475,58 @@ func (s *streamSeriesSet) Err() error { return errors.Wrap(s.err, s.name) } -// matchStore returns true if the given store may hold data for the given label matchers. -func storeMatches(s Client, mint, maxt int64, storeDebugMatchers [][]*labels.Matcher, matchers ...storepb.LabelMatcher) (bool, error) { +// storeMatches returns boolean if the given store may hold data for the given label matchers, time ranges and debug store matches gathered from context. +// It also produces tracing span. +func storeMatches(ctx context.Context, s Client, mint, maxt int64, matchers ...*labels.Matcher) (ok bool, reason string) { + span, ctx := tracing.StartSpan(ctx, "store_matches") + defer span.Finish() + + var storeDebugMatcher [][]*labels.Matcher + if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { + if value, ok := ctxVal.([][]*labels.Matcher); ok { + storeDebugMatcher = value + } + } + storeMinTime, storeMaxTime := s.TimeRange() - if mint > storeMaxTime || maxt <= storeMinTime { - return false, nil + if mint > storeMaxTime || maxt < storeMinTime { + return false, fmt.Sprintf("does not have data within this time period: [%v,%v]. Store time ranges: [%v,%v]", mint, maxt, storeMinTime, storeMaxTime) } - if !storeMatchDebugMetadata(s, storeDebugMatchers) { - return false, nil + if ok, reason := storeMatchDebugMetadata(s, storeDebugMatcher); !ok { + return false, reason } - promMatchers, err := storepb.TranslateFromPromMatchers(matchers...) - if err != nil { - return false, err + extLset := s.LabelSets() + if !labelSetsMatch(matchers, extLset...) { + return false, fmt.Sprintf("external labels %v does not match request label matchers: %v", extLset, matchers) } - return labelSetsMatch(promMatchers, s.LabelSets()...), nil + return true, "" } // storeMatchDebugMetadata return true if the store's address match the storeDebugMatchers. -func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) bool { +func storeMatchDebugMetadata(s Client, storeDebugMatchers [][]*labels.Matcher) (ok bool, reason string) { if len(storeDebugMatchers) == 0 { - return true + return true, "" } match := false for _, sm := range storeDebugMatchers { match = match || labelSetsMatch(sm, labels.FromStrings("__address__", s.Addr())) } - return match + if !match { + return false, fmt.Sprintf("__address__ %v does not match debug store metadata matchers: %v", s.Addr(), storeDebugMatchers) + } + return true, "" } // labelSetsMatch returns false if all label-set do not match the matchers (aka: OR is between all label-sets). -func labelSetsMatch(matchers []*labels.Matcher, lss ...labels.Labels) bool { - if len(lss) == 0 { +func labelSetsMatch(matchers []*labels.Matcher, lset ...labels.Labels) bool { + if len(lset) == 0 { return true } - for _, ls := range lss { + for _, ls := range lset { notMatched := false for _, m := range matchers { if lv := ls.Get(m.Name); lv != "" && !m.Matches(lv) { @@ -549,19 +555,10 @@ func (s *ProxyStore) LabelNames(ctx context.Context, r *storepb.LabelNamesReques for _, st := range s.stores() { st := st - var ok bool - tracing.DoInSpan(gctx, "store_matches", func(ctx context.Context) { - var storeDebugMatcher [][]*labels.Matcher - if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { - if value, ok := ctxVal.([][]*labels.Matcher); ok { - storeDebugMatcher = value - } - } - // We can skip error, we already translated matchers once. - ok, _ = storeMatches(st, r.Start, r.End, storeDebugMatcher) - }) - if !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out", st)) + + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. + if ok, reason := storeMatches(gctx, st, r.Start, r.End); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to %v", st, reason)) continue } storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) @@ -617,33 +614,24 @@ func (s *ProxyStore) LabelValues(ctx context.Context, r *storepb.LabelValuesRequ ) for _, st := range s.stores() { - store := st - var ok bool - tracing.DoInSpan(gctx, "store_matches", func(ctx context.Context) { - var storeDebugMatcher [][]*labels.Matcher - if ctxVal := ctx.Value(StoreMatcherKey); ctxVal != nil { - if value, ok := ctxVal.([][]*labels.Matcher); ok { - storeDebugMatcher = value - } - } - // We can skip error, we already translated matchers once. - ok, _ = storeMatches(st, r.Start, r.End, storeDebugMatcher) - }) - if !ok { - storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out", st)) + st := st + + // We might be able to skip the store if its meta information indicates it cannot have series matching our query. + if ok, reason := storeMatches(gctx, st, r.Start, r.End); !ok { + storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s filtered out due to %v", st, reason)) continue } storeDebugMsgs = append(storeDebugMsgs, fmt.Sprintf("Store %s queried", st)) g.Go(func() error { - resp, err := store.LabelValues(gctx, &storepb.LabelValuesRequest{ + resp, err := st.LabelValues(gctx, &storepb.LabelValuesRequest{ Label: r.Label, PartialResponseDisabled: r.PartialResponseDisabled, Start: r.Start, End: r.End, }) if err != nil { - err = errors.Wrapf(err, "fetch label values from store %s", store) + err = errors.Wrapf(err, "fetch label values from store %s", st) if r.PartialResponseDisabled { return err } diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go index 41f90952722..63c3808135a 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/custom.go @@ -337,9 +337,9 @@ func (x *PartialResponseStrategy) MarshalJSON() ([]byte, error) { return []byte(strconv.Quote(x.String())), nil } -// TranslatePromMatchers returns proto matchers from Prometheus matchers. +// PromMatchersToMatchers returns proto matchers from Prometheus matchers. // NOTE: It allocates memory. -func TranslatePromMatchers(ms ...*labels.Matcher) ([]LabelMatcher, error) { +func PromMatchersToMatchers(ms ...*labels.Matcher) ([]LabelMatcher, error) { res := make([]LabelMatcher, 0, len(ms)) for _, m := range ms { var t LabelMatcher_Type @@ -361,10 +361,9 @@ func TranslatePromMatchers(ms ...*labels.Matcher) ([]LabelMatcher, error) { return res, nil } -// TranslateFromPromMatchers returns Prometheus matchers from proto matchers. +// MatchersToPromMatchers returns Prometheus matchers from proto matchers. // NOTE: It allocates memory. -// TODO(bwplotka): Create yolo/no-alloc helper. -func TranslateFromPromMatchers(ms ...LabelMatcher) ([]*labels.Matcher, error) { +func MatchersToPromMatchers(ms ...LabelMatcher) ([]*labels.Matcher, error) { res := make([]*labels.Matcher, 0, len(ms)) for _, m := range ms { var t labels.MatchType diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go new file mode 100644 index 00000000000..aeb4a25aef2 --- /dev/null +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/inprocess.go @@ -0,0 +1,97 @@ +// Copyright (c) The Thanos Authors. +// Licensed under the Apache License 2.0. + +package storepb + +import ( + "context" + "io" + + "google.golang.org/grpc" +) + +func ServerAsClient(srv StoreServer, clientReceiveBufferSize int) StoreClient { + return &serverAsClient{srv: srv, clientReceiveBufferSize: clientReceiveBufferSize} +} + +// serverAsClient allows to use servers as clients. +// NOTE: Passing CallOptions does not work - it would be needed to be implemented in grpc itself (before, after are private). +type serverAsClient struct { + clientReceiveBufferSize int + srv StoreServer +} + +func (s serverAsClient) Info(ctx context.Context, in *InfoRequest, _ ...grpc.CallOption) (*InfoResponse, error) { + return s.srv.Info(ctx, in) +} + +func (s serverAsClient) LabelNames(ctx context.Context, in *LabelNamesRequest, _ ...grpc.CallOption) (*LabelNamesResponse, error) { + return s.srv.LabelNames(ctx, in) +} + +func (s serverAsClient) LabelValues(ctx context.Context, in *LabelValuesRequest, _ ...grpc.CallOption) (*LabelValuesResponse, error) { + return s.srv.LabelValues(ctx, in) +} + +func (s serverAsClient) Series(ctx context.Context, in *SeriesRequest, _ ...grpc.CallOption) (Store_SeriesClient, error) { + inSrv := &inProcessStream{recv: make(chan *SeriesResponse, s.clientReceiveBufferSize), err: make(chan error)} + inSrv.ctx, inSrv.cancel = context.WithCancel(ctx) + go func() { + inSrv.err <- s.srv.Series(in, inSrv) + close(inSrv.err) + close(inSrv.recv) + }() + return &inProcessClientStream{srv: inSrv}, nil +} + +// TODO(bwplotka): Add streaming attributes, metadata etc. Currently those are disconnected. Follow up on https://github.com/grpc/grpc-go/issues/906. +// TODO(bwplotka): Use this in proxy.go and receiver multi tenant proxy. +type inProcessStream struct { + grpc.ServerStream + + ctx context.Context + cancel context.CancelFunc + recv chan *SeriesResponse + err chan error +} + +func (s *inProcessStream) Context() context.Context { return s.ctx } + +func (s *inProcessStream) Send(r *SeriesResponse) error { + select { + case <-s.ctx.Done(): + return s.ctx.Err() + case s.recv <- r: + return nil + } +} + +type inProcessClientStream struct { + grpc.ClientStream + + srv *inProcessStream +} + +func (s *inProcessClientStream) Context() context.Context { return s.srv.ctx } + +func (s *inProcessClientStream) CloseSend() error { + s.srv.cancel() + return nil +} + +func (s *inProcessClientStream) Recv() (*SeriesResponse, error) { + select { + case <-s.srv.ctx.Done(): + return nil, s.srv.ctx.Err() + case r, ok := <-s.srv.recv: + if !ok { + return nil, io.EOF + } + return r, nil + case err := <-s.srv.err: + if err == nil { + return nil, io.EOF + } + return nil, err + } +} diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go index f9b8bbc1314..93ab7ece8c2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.pb.go @@ -138,6 +138,7 @@ func (m *Sample) GetTimestamp() int64 { // TimeSeries represents samples and labels for a single time series. type TimeSeries struct { + // Labels have to be sorted by label names and without duplicated label names. // TODO(bwplotka): Don't use zero copy ZLabels, see https://github.com/thanos-io/thanos/pull/3279 for details. Labels []github_com_thanos_io_thanos_pkg_store_labelpb.ZLabel `protobuf:"bytes,1,rep,name=labels,proto3,customtype=github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel" json:"labels"` Samples []Sample `protobuf:"bytes,2,rep,name=samples,proto3" json:"samples"` diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto index 2b7ac257754..64b8f0d9ed2 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto +++ b/vendor/github.com/thanos-io/thanos/pkg/store/storepb/prompb/types.proto @@ -35,6 +35,7 @@ message Sample { // TimeSeries represents samples and labels for a single time series. message TimeSeries { + // Labels have to be sorted by label names and without duplicated label names. // TODO(bwplotka): Don't use zero copy ZLabels, see https://github.com/thanos-io/thanos/pull/3279 for details. repeated thanos.Label labels = 1 [(gogoproto.nullable) = false, (gogoproto.customtype) = "github.com/thanos-io/thanos/pkg/store/labelpb.ZLabel"]; repeated Sample samples = 2 [(gogoproto.nullable) = false]; diff --git a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go index c9f4421e713..ca25072fb07 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go +++ b/vendor/github.com/thanos-io/thanos/pkg/store/tsdb.go @@ -10,7 +10,6 @@ import ( "github.com/go-kit/kit/log" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/prometheus/pkg/labels" "github.com/prometheus/prometheus/storage" "github.com/thanos-io/thanos/pkg/store/labelpb" @@ -37,7 +36,7 @@ type TSDBStore struct { logger log.Logger db TSDBReader component component.StoreAPI - externalLabels labels.Labels + extLset labels.Labels maxBytesPerFrame int } @@ -54,7 +53,8 @@ type ReadWriteTSDBStore struct { } // NewTSDBStore creates a new TSDBStore. -func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db TSDBReader, component component.StoreAPI, externalLabels labels.Labels) *TSDBStore { +// NOTE: Given lset has to be sorted. +func NewTSDBStore(logger log.Logger, db TSDBReader, component component.StoreAPI, extLset labels.Labels) *TSDBStore { if logger == nil { logger = log.NewNopLogger() } @@ -62,7 +62,7 @@ func NewTSDBStore(logger log.Logger, _ prometheus.Registerer, db TSDBReader, com logger: logger, db: db, component: component, - externalLabels: externalLabels, + extLset: extLset, maxBytesPerFrame: RemoteReadFrameLimit, } } @@ -75,7 +75,7 @@ func (s *TSDBStore) Info(_ context.Context, _ *storepb.InfoRequest) (*storepb.In } res := &storepb.InfoResponse{ - Labels: labelpb.ZLabelsFromPromLabels(s.externalLabels), + Labels: labelpb.ZLabelsFromPromLabels(s.extLset), StoreType: s.component.ToProto(), MinTime: minTime, MaxTime: math.MaxInt64, @@ -101,7 +101,7 @@ type CloseDelegator interface { // Series returns all series for a requested time range and label matcher. The returned data may // exceed the requested time bounds. func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesServer) error { - match, newMatchers, err := matchesExternalLabels(r.Matchers, s.externalLabels) + match, matchers, err := matchesExternalLabels(r.Matchers, s.extLset) if err != nil { return status.Error(codes.InvalidArgument, err.Error()) } @@ -110,15 +110,10 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer return nil } - if len(newMatchers) == 0 { + if len(matchers) == 0 { return status.Error(codes.InvalidArgument, errors.New("no matchers specified (excluding external labels)").Error()) } - matchers, err := storepb.TranslateFromPromMatchers(newMatchers...) - if err != nil { - return status.Error(codes.InvalidArgument, err.Error()) - } - q, err := s.db.ChunkQuerier(context.Background(), r.MinTime, r.MaxTime) if err != nil { return status.Error(codes.Internal, err.Error()) @@ -135,16 +130,16 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer // Stream at most one series per frame; series may be split over multiple frames according to maxBytesInFrame. for set.Next() { series := set.At() - seriesLabels := storepb.Series{Labels: labelpb.ZLabelsFromPromLabels(labelpb.ExtendLabels(series.Labels(), s.externalLabels))} + storeSeries := storepb.Series{Labels: labelpb.ZLabelsFromPromLabels(labelpb.ExtendSortedLabels(series.Labels(), s.extLset))} if r.SkipChunks { - if err := srv.Send(storepb.NewSeriesResponse(&seriesLabels)); err != nil { + if err := srv.Send(storepb.NewSeriesResponse(&storeSeries)); err != nil { return status.Error(codes.Aborted, err.Error()) } continue } bytesLeftForChunks := s.maxBytesPerFrame - for _, lbl := range seriesLabels.Labels { + for _, lbl := range storeSeries.Labels { bytesLeftForChunks -= lbl.Size() } frameBytesLeft := bytesLeftForChunks @@ -174,7 +169,7 @@ func (s *TSDBStore) Series(r *storepb.SeriesRequest, srv storepb.Store_SeriesSer if frameBytesLeft > 0 && isNext { continue } - if err := srv.Send(storepb.NewSeriesResponse(&storepb.Series{Labels: seriesLabels.Labels, Chunks: seriesChunks})); err != nil { + if err := srv.Send(storepb.NewSeriesResponse(&storepb.Series{Labels: storeSeries.Labels, Chunks: seriesChunks})); err != nil { return status.Error(codes.Aborted, err.Error()) } diff --git a/vendor/github.com/weaveworks/common/httpgrpc/README.md b/vendor/github.com/weaveworks/common/httpgrpc/README.md index 50ce9698085..4e4d7fe3db9 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/README.md +++ b/vendor/github.com/weaveworks/common/httpgrpc/README.md @@ -1,4 +1,4 @@ -**What?** Embedding HTTP requests and responses into a gRPC service; a service and client to translate back and forth between the two, so you can use them with your faviourite mux. +**What?** Embedding HTTP requests and responses into a gRPC service; a service and client to translate back and forth between the two, so you can use them with your preferred mux. **Why?** Get all the goodness of protobuf encoding, HTTP/2, snappy, load balancing, persistent connection and native Kubernetes load balancing with ~none of the effort. diff --git a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go index c312191eb8b..d478e18a439 100644 --- a/vendor/github.com/weaveworks/common/httpgrpc/server/server.go +++ b/vendor/github.com/weaveworks/common/httpgrpc/server/server.go @@ -37,15 +37,25 @@ func NewServer(handler http.Handler) *Server { } } +type nopCloser struct { + *bytes.Buffer +} + +func (nopCloser) Close() error { return nil } + +// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. +func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } + // Handle implements HTTPServer. func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - req, err := http.NewRequest(r.Method, r.Url, ioutil.NopCloser(bytes.NewReader(r.Body))) + req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) if err != nil { return nil, err } toHeader(r.Headers, req.Header) req = req.WithContext(ctx) req.RequestURI = r.Url + req.ContentLength = int64(len(r.Body)) recorder := httptest.NewRecorder() s.handler.ServeHTTP(recorder, req) diff --git a/vendor/github.com/weaveworks/common/tracing/tracing.go b/vendor/github.com/weaveworks/common/tracing/tracing.go index ae38417e977..2d1c6901825 100644 --- a/vendor/github.com/weaveworks/common/tracing/tracing.go +++ b/vendor/github.com/weaveworks/common/tracing/tracing.go @@ -10,7 +10,7 @@ import ( // ErrInvalidConfiguration is an error to notify client to provide valid trace report agent or config server var ( - ErrBlankTraceConfiguration = errors.New("no trace report agent or config server specified") + ErrBlankTraceConfiguration = errors.New("no trace report agent, config server, or collector endpoint specified") ) // installJaeger registers Jaeger as the OpenTracing implementation. @@ -35,7 +35,7 @@ func NewFromEnv(serviceName string) (io.Closer, error) { return nil, errors.Wrap(err, "could not load jaeger tracer configuration") } - if cfg.Sampler.SamplingServerURL == "" && cfg.Reporter.LocalAgentHostPort == "" { + if cfg.Sampler.SamplingServerURL == "" && cfg.Reporter.LocalAgentHostPort == "" && cfg.Reporter.CollectorEndpoint == "" { return nil, ErrBlankTraceConfiguration } diff --git a/vendor/go.opencensus.io/go.mod b/vendor/go.opencensus.io/go.mod index c867df5f5c4..29707156269 100644 --- a/vendor/go.opencensus.io/go.mod +++ b/vendor/go.opencensus.io/go.mod @@ -7,7 +7,7 @@ require ( github.com/stretchr/testify v1.4.0 golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect - golang.org/x/text v0.3.2 // indirect + golang.org/x/text v0.3.3 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go index abe978b67b8..49fde3d8c82 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/client_metrics.go @@ -60,6 +60,8 @@ var ( Aggregation: DefaultMillisecondsDistribution, } + // Purposely reuses the count from `ClientRoundtripLatency`, tagging + // with method and status to result in ClientCompletedRpcs. ClientCompletedRPCsView = &view.View{ Measure: ClientRoundtripLatency, Name: "grpc.io/client/completed_rpcs", diff --git a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go index 609d9ed248b..b2059824a85 100644 --- a/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go +++ b/vendor/go.opencensus.io/plugin/ocgrpc/server_metrics.go @@ -63,6 +63,8 @@ var ( Aggregation: DefaultMillisecondsDistribution, } + // Purposely reuses the count from `ServerLatency`, tagging + // with method and status to result in ServerCompletedRpcs. ServerCompletedRPCsView = &view.View{ Name: "grpc.io/server/completed_rpcs", Description: "Count of RPCs by method and status.", diff --git a/vendor/go.opencensus.io/stats/view/aggregation.go b/vendor/go.opencensus.io/stats/view/aggregation.go index 9d7093728ed..748bd568cda 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation.go +++ b/vendor/go.opencensus.io/stats/view/aggregation.go @@ -15,6 +15,8 @@ package view +import "time" + // AggType represents the type of aggregation function used on a View. type AggType int @@ -45,20 +47,20 @@ type Aggregation struct { Type AggType // Type is the AggType of this Aggregation. Buckets []float64 // Buckets are the bucket endpoints if this Aggregation represents a distribution, see Distribution. - newData func() AggregationData + newData func(time.Time) AggregationData } var ( aggCount = &Aggregation{ Type: AggTypeCount, - newData: func() AggregationData { - return &CountData{} + newData: func(t time.Time) AggregationData { + return &CountData{Start: t} }, } aggSum = &Aggregation{ Type: AggTypeSum, - newData: func() AggregationData { - return &SumData{} + newData: func(t time.Time) AggregationData { + return &SumData{Start: t} }, } ) @@ -103,8 +105,8 @@ func Distribution(bounds ...float64) *Aggregation { Type: AggTypeDistribution, Buckets: bounds, } - agg.newData = func() AggregationData { - return newDistributionData(agg) + agg.newData = func(t time.Time) AggregationData { + return newDistributionData(agg, t) } return agg } @@ -114,7 +116,7 @@ func Distribution(bounds ...float64) *Aggregation { func LastValue() *Aggregation { return &Aggregation{ Type: AggTypeLastValue, - newData: func() AggregationData { + newData: func(_ time.Time) AggregationData { return &LastValueData{} }, } diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index f331d456e9b..d93b520662d 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -31,6 +31,7 @@ type AggregationData interface { clone() AggregationData equal(other AggregationData) bool toPoint(t metricdata.Type, time time.Time) metricdata.Point + StartTime() time.Time } const epsilon = 1e-9 @@ -40,6 +41,7 @@ const epsilon = 1e-9 // // Most users won't directly access count data. type CountData struct { + Start time.Time Value int64 } @@ -50,7 +52,7 @@ func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) } func (a *CountData) clone() AggregationData { - return &CountData{Value: a.Value} + return &CountData{Value: a.Value, Start: a.Start} } func (a *CountData) equal(other AggregationData) bool { @@ -59,7 +61,7 @@ func (a *CountData) equal(other AggregationData) bool { return false } - return a.Value == a2.Value + return a.Start.Equal(a2.Start) && a.Value == a2.Value } func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -71,11 +73,17 @@ func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata. } } +// StartTime returns the start time of the data being aggregated by CountData. +func (a *CountData) StartTime() time.Time { + return a.Start +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // // Most users won't directly access sum data. type SumData struct { + Start time.Time Value float64 } @@ -86,7 +94,7 @@ func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { } func (a *SumData) clone() AggregationData { - return &SumData{Value: a.Value} + return &SumData{Value: a.Value, Start: a.Start} } func (a *SumData) equal(other AggregationData) bool { @@ -94,7 +102,7 @@ func (a *SumData) equal(other AggregationData) bool { if !ok { return false } - return math.Pow(a.Value-a2.Value, 2) < epsilon + return a.Start.Equal(a2.Start) && math.Pow(a.Value-a2.Value, 2) < epsilon } func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -108,6 +116,11 @@ func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Po } } +// StartTime returns the start time of the data being aggregated by SumData. +func (a *SumData) StartTime() time.Time { + return a.Start +} + // DistributionData is the aggregated data for the // Distribution aggregation. // @@ -126,9 +139,10 @@ type DistributionData struct { // an exemplar for the associated bucket, or nil. ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values + Start time.Time } -func newDistributionData(agg *Aggregation) *DistributionData { +func newDistributionData(agg *Aggregation, t time.Time) *DistributionData { bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), @@ -136,6 +150,7 @@ func newDistributionData(agg *Aggregation) *DistributionData { bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, + Start: t, } } @@ -226,7 +241,11 @@ func (a *DistributionData) equal(other AggregationData) bool { return false } } - return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon + return a.Start.Equal(a2.Start) && + a.Count == a2.Count && + a.Min == a2.Min && + a.Max == a2.Max && + math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { @@ -256,6 +275,11 @@ func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metr } } +// StartTime returns the start time of the data being aggregated by DistributionData. +func (a *DistributionData) StartTime() time.Time { + return a.Start +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -291,3 +315,22 @@ func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricd panic("unsupported metricdata.Type") } } + +// StartTime returns an empty time value as start time is not recorded when using last value +// aggregation. +func (l *LastValueData) StartTime() time.Time { + return time.Time{} +} + +// ClearStart clears the Start field from data if present. Useful for testing in cases where the +// start time will be nondeterministic. +func ClearStart(data AggregationData) { + switch data := data.(type) { + case *CountData: + data.Start = time.Time{} + case *SumData: + data.Start = time.Time{} + case *DistributionData: + data.Start = time.Time{} + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 8a6a2c0fdc9..ac22c93a2b5 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -35,7 +35,7 @@ type collector struct { func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { - aggregator = c.a.newData() + aggregator = c.a.newData(t) c.signatures[s] = aggregator } aggregator.addSample(v, attachments, t) diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go index 5e1656a1f2b..57d615ec7e1 100644 --- a/vendor/go.opencensus.io/stats/view/view_to_metric.go +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -119,20 +119,15 @@ func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.La return labelValues } -func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { +func rowToTimeseries(v *viewInternal, row *Row, now time.Time) *metricdata.TimeSeries { return &metricdata.TimeSeries{ Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), - StartTime: startTime, + StartTime: row.Data.StartTime(), } } -func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTime time.Time) *metricdata.Metric { - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } - +func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time) *metricdata.Metric { rows := v.collectedRows() if len(rows) == 0 { return nil @@ -140,7 +135,7 @@ func viewToMetric(v *viewInternal, r *resource.Resource, now time.Time, startTim ts := []*metricdata.TimeSeries{} for _, row := range rows { - ts = append(ts, rowToTimeseries(v, row, now, startTime)) + ts = append(ts, rowToTimeseries(v, row, now)) } m := &metricdata.Metric{ diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index ab8bfd46d0b..6e8d18b7f6d 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -41,9 +41,9 @@ type measureRef struct { } type worker struct { - measures map[string]*measureRef - views map[string]*viewInternal - startTimes map[*viewInternal]time.Time + measures map[string]*measureRef + views map[string]*viewInternal + viewStartTimes map[*viewInternal]time.Time timer *time.Ticker c chan command @@ -244,13 +244,13 @@ func (w *worker) SetReportingPeriod(d time.Duration) { // a single process. func NewMeter() Meter { return &worker{ - measures: make(map[string]*measureRef), - views: make(map[string]*viewInternal), - startTimes: make(map[*viewInternal]time.Time), - timer: time.NewTicker(defaultReportingDuration), - c: make(chan command, 1024), - quit: make(chan bool), - done: make(chan bool), + measures: make(map[string]*measureRef), + views: make(map[string]*viewInternal), + viewStartTimes: make(map[*viewInternal]time.Time), + timer: time.NewTicker(defaultReportingDuration), + c: make(chan command, 1024), + quit: make(chan bool), + done: make(chan bool), exporters: make(map[Exporter]struct{}), } @@ -324,7 +324,7 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return x, nil } w.views[vi.view.Name] = vi - w.startTimes[vi] = time.Now() + w.viewStartTimes[vi] = time.Now() ref := w.getMeasureRef(vi.view.Measure.Name()) ref.views[vi] = struct{}{} return vi, nil @@ -334,7 +334,7 @@ func (w *worker) unregisterView(v *viewInternal) { w.mu.Lock() defer w.mu.Unlock() delete(w.views, v.view.Name) - delete(w.startTimes, v) + delete(w.viewStartTimes, v) if measure := w.measures[v.view.Measure.Name()]; measure != nil { delete(measure.views, v) } @@ -347,7 +347,7 @@ func (w *worker) reportView(v *viewInternal) { rows := v.collectedRows() viewData := &Data{ View: v.view, - Start: w.startTimes[v], + Start: w.viewStartTimes[v], End: time.Now(), Rows: rows, } @@ -371,15 +371,7 @@ func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { return nil } - var startTime time.Time - if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || - v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { - startTime = time.Time{} - } else { - startTime = w.startTimes[v] - } - - return viewToMetric(v, w.r, now, startTime) + return viewToMetric(v, w.r, now) } // Read reads all view data and returns them as metrics. diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index 125e2cd9012..daf895596a9 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -206,6 +206,10 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa span.spanContext = parent cfg := config.Load().(*Config) + if gen, ok := cfg.IDGenerator.(*defaultIDGenerator); ok { + // lazy initialization + gen.init() + } if !hasParent { span.spanContext.TraceID = cfg.IDGenerator.NewTraceID() @@ -534,20 +538,9 @@ func (s *Span) String() string { var config atomic.Value // access atomically func init() { - gen := &defaultIDGenerator{} - // initialize traceID and spanID generators. - var rngSeed int64 - for _, p := range []interface{}{ - &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, - } { - binary.Read(crand.Reader, binary.LittleEndian, p) - } - gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) - gen.spanIDInc |= 1 - config.Store(&Config{ DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + IDGenerator: &defaultIDGenerator{}, MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, @@ -571,6 +564,24 @@ type defaultIDGenerator struct { traceIDAdd [2]uint64 traceIDRand *rand.Rand + + initOnce sync.Once +} + +// init initializes the generator on the first call to avoid consuming entropy +// unnecessarily. +func (gen *defaultIDGenerator) init() { + gen.initOnce.Do(func() { + // initialize traceID and spanID generators. + var rngSeed int64 + for _, p := range []interface{}{ + &rngSeed, &gen.traceIDAdd, &gen.nextSpanID, &gen.spanIDInc, + } { + binary.Read(crand.Reader, binary.LittleEndian, p) + } + gen.traceIDRand = rand.New(rand.NewSource(rngSeed)) + gen.spanIDInc |= 1 + }) } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go index 2fc1ec03122..1108e114472 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build amd64,gc,!purego package argon2 diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s index 74a6e7332a9..c4c84f07a01 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go index baf7b551daf..4a963c7808f 100644 --- a/vendor/golang.org/x/crypto/argon2/blamka_ref.go +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine gccgo +// +build !amd64 purego !gc package argon2 diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go index 4d31dd0fdcd..8a893fdfff5 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.7,amd64,!gccgo,!appengine +// +build go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s index 5593b1b3dce..8608a7f7d1c 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2bAVX2_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build go1.7,amd64,!gccgo,!appengine +// +build go1.7,amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go index 30e2fcd581f..a52c887fcb3 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !go1.7,amd64,!gccgo,!appengine +// +build !go1.7,amd64,gc,!purego package blake2b diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s index 578e947b3bf..1f4c6a92791 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_amd64.s @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build amd64,!gccgo,!appengine +// +build amd64,gc,!purego #include "textflag.h" diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go index da156a1ba62..85974577811 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b_ref.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build !amd64 appengine gccgo +// +build !amd64 purego !gc package blake2b diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 8b129b79419..7688d72c396 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -2632,7 +2632,9 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body func (s bodyWriterState) cancel() { if s.timer != nil { - s.timer.Stop() + if s.timer.Stop() { + s.resc <- nil + } } } diff --git a/vendor/golang.org/x/net/internal/socket/zsys_openbsd_mips64.go b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_mips64.go new file mode 100644 index 00000000000..0112832400b --- /dev/null +++ b/vendor/golang.org/x/net/internal/socket/zsys_openbsd_mips64.go @@ -0,0 +1,50 @@ +// Code generated by cmd/cgo -godefs; DO NOT EDIT. +// cgo -godefs defs_openbsd.go + +package socket + +type iovec struct { + Base *byte + Len uint64 +} + +type msghdr struct { + Name *byte + Namelen uint32 + Iov *iovec + Iovlen uint32 + Control *byte + Controllen uint32 + Flags int32 +} + +type cmsghdr struct { + Len uint32 + Level int32 + Type int32 +} + +type sockaddrInet struct { + Len uint8 + Family uint8 + Port uint16 + Addr [4]byte /* in_addr */ + Zero [8]int8 +} + +type sockaddrInet6 struct { + Len uint8 + Family uint8 + Port uint16 + Flowinfo uint32 + Addr [16]byte /* in6_addr */ + Scope_id uint32 +} + +const ( + sizeofIovec = 0x10 + sizeofMsghdr = 0x30 + + sizeofSockaddrInet = 0x10 + sizeofSockaddrInet6 = 0x1c +) diff --git a/vendor/golang.org/x/net/publicsuffix/table.go b/vendor/golang.org/x/net/publicsuffix/table.go index ec2bde8cb21..31a034c5deb 100644 --- a/vendor/golang.org/x/net/publicsuffix/table.go +++ b/vendor/golang.org/x/net/publicsuffix/table.go @@ -2,7 +2,7 @@ package publicsuffix -const version = "publicsuffix.org's public_suffix_list.dat, git revision bdbe9dfd268d040fc826766b1d4e27dc4416fe73 (2020-08-10T09:26:55Z)" +const version = "publicsuffix.org's public_suffix_list.dat, git revision f9f612a3386dd9a1e4a1892722e3418549520b49 (2020-11-30T21:55:23Z)" const ( nodesBitsChildren = 10 @@ -23,492 +23,499 @@ const ( ) // numTLD is the number of top level domains. -const numTLD = 1518 +const numTLD = 1513 // Text is the combined text of all labels. const text = "9guacuiababia-goracleaningroks-theatree12hpalermomahachijolstere" + - "trosnubalsfjorddnslivelanddnss3-ap-south-1kappchizip6116-b-datai" + - "ji234lima-cityeatselinogradult3l3p0rtatamotors3-ap-northeast-133" + - "7birkenesoddtangenovaranzaninohekinannestadivttasvuotnakamuratak" + - "ahamalselvendrellimitediyukuhashimojindianapolis-a-bloggerbirthp" + - "lacebjarkoyurihonjournalistjohninomiyakonojorpelandnpanamatta-va" + - "rjjatjeldsundrangedalimoliseminebjerkreimdbamblebesbyglandroverh" + - "alla-speziaustevollaziobihirosakikamijimatsuzakibigawagrocerybni" + - "keisenbahnatuurwetenschappenaumburgdyniabogadobeaemcloud66bjugni" + - "eznord-frontierblackfridayusuharabloombergbauernirasakindianmark" + - "etingjesdalinkyard-cloudyclusterbloxcms3-website-us-west-2blueda" + - "gestangeologyusuisservehumourbmoattachments5yuulmemorialivornoce" + - "anographiquebmsakyotanabellunord-aurdalpha-myqnapcloudaccesscamb" + - "ridgeiseiyoichippubetsubetsugarugbydgoszczecinemagentositechnolo" + - "gyuzawabmweddingjovikariyameinforumzjampagexlombardynaliaskimits" + - "ubatamibugattiffanycateringebuildingladefinimakanegasakirabnrwed" + - "eploybomloabathsbcatholicaxiashorokanaiebondray-dnstracebonnishi" + - "azaindielddanuorrindigenaklodzkodairabookinghostedpictethnologyb" + - "oomlair-traffic-controlleyboschaefflerdalomzaporizhzhegurindustr" + - "iabostikarlsoybostonakijinsekikogentappsselfipanasonichernihivgu" + - "bsalangenishigocelotenkawabotanicalgardenishiharabotanicgardenis" + - "hiizunazukindustriesteamsterdamnserverbaniabotanynysagaeroclubme" + - "decincinnationwidealerbouncemerckmsdnipropetrovskjervoyagets-itj" + - "maxxxboxenapponazure-mobilebounty-fullensakerrypropertiesalondon" + - "etskarmoyboutiquebechernivtsiciliabozen-sudtirolondrinamsskogane" + - "infinitintelligencebozen-suedtirolorenskoglassassinationalherita" + - "gebplacedogawarabikomaezakirunorddalottebrandywinevalleybrasilia" + - "brindisibenikinderoybristoloseyouriparachutingleezebritishcolumb" + - "ialowiezaganishikatakinouebroadcastlebtimnetzlglitchattanooganor" + - "dlandrayddnsfreebox-osascoli-picenordre-landraydnsupdaternopilaw" + - "atchesaltdalottokonamegatakazakinternationalfirearmsaludrivefsni" + - "llfjordrobaknoluoktachikawakuyabukievennodesadoes-itvedestrandru" + - "dupontariobranconakaniikawatanagurabroadwaybroke-itjomeloyalisto" + - "ragebrokerbronnoysundurbanamexhibitionishikatsuragit-reposalvado" + - "rdalibabalena-devicesalzburgliwicebrothermesaverdealstahaugesund" + - "erseaportsinfolldalouvreisenishikawazukamisunagawabrowsersafetym" + - "arketsamegawabrumunddalowiczest-le-patronishimerabrunelastxfinit" + - "ybrusselsamnangerbruxellesampalacebryansklepparaglidinglobalasho" + - "vhachinohedmarkarpaczeladzparisor-fronishinomiyashironocparliame" + - "ntjxjavald-aostarnbergloboavistanbulsan-sudtirolpusercontentkmax" + - "xn--0trq7p7nnishinoomotegoddabrynewhollandurhamburglogowegroweib" + - "olognagareyamakeupowiathletajimabaridagawalbrzycharitydalaskanit" + - "tedallasalleangaviikaascolipicenodumemsettsupportksatxn--11b4c3d" + - "ynathomebuiltwithdarkaruizawabuskerudinewjerseybuzentsujiiebuzzw" + - "eirbwellbeingzonebzhitomirumalatvuopmicrolightingloppenzaolbia-t" + - "empio-olbiatempioolbialystokkepnogatagajobojintuitmparmattelekom" + - "munikationishinoshimatsuurabzzcolumbusheycommunexus-2community-p" + - "rochowicecomoarekecomparemarkerryhotelsaobernardocompute-1comput" + - "erhistoryofscience-fictioncomsecuritytacticsxn--12cfi8ixb8luxury" + - "condoshichinohealth-carereformitakeharaconferenceconstructioncon" + - "suladonnagatorodoyconsultanthropologyconsultingrondarcontactozsd" + - "eltajirittogliattis-a-chefashioncontagematsubaracontemporaryarte" + - "ducationalchikugodontexistmein-iservebeercontractorskenconventur" + - "eshinodearthruherecipescaravantaacookingchannelsdvrdnsdojoburgro" + - "ngausdaluzerncoolvivanovoldacooperativano-frankivskolefrakkestad" + - "yndns1copenhagencyclopedichitosetogakushimotoganewspapercoproduc" + - "tionsaogoncartoonartdecologiacorporationcorsicagliaricoharuovatm" + - "allorcadaquesaotomeldalcorvettemasekashiwazakiyosemitecosenzakop" + - "anelblagrarchaeologyeongbuk0cosidnsfor-better-thanawassamukawata" + - "rikuzentakatajimidorissagamiharacostumedicinaharimalopolskanland" + - "ynnsapporocouchpotatofriesardegnaroycouklugsmilegallocus-3counci" + - "lcouponsardiniacozoracq-acranbrookuwanalyticsarlcrdynservebbsarp" + - "sborgrossetouchihayaakasakawaharacreditcardynulvikasserversaille" + - "sarufutsunomiyawakasaikaitakofuefukihaboromskogroundhandlingrozn" + - "ycreditunioncremonashgabadaddjaguarqcxn--12co0c3b4evalleaostavan" + - "gercrewiencricketrzyncrimeast-kazakhstanangercrotonecrownipartsa" + - "sayamacrsvpartycruisesasebofageometre-experts-comptablesaskatche" + - "wancryptonomichigangwoncuisinellajollamericanexpressexyculturalc" + - "entertainmentrani-andria-barletta-trani-andriacuneocupcakecuriti" + - "backyardsassaris-a-conservativegarsheis-a-cpadualstackhero-netwo" + - "rkinggroupasadenarashinocurvalled-aostaverncymrussiacyonabarumet" + - "lifeinsurancecyouthachiojiyaitakanezawafetsundyroyrvikingrpassag" + - "ensaudafguidegreefhvalerfidoomdnsiskinkyotobetsulikes-piedmontic" + - "ellodingenfieldfigueresinstaginguitarsavonarusawafilateliafilege" + - "ar-audnedalnfilegear-deatnunusualpersonfilegear-gbizfilegear-ief" + - "ilegear-jpmorganfilegear-sgujoinvilleitungsenfilminamiechizenfin" + - "alfinancefineartsaxofinlandfinnoyfirebaseappassenger-association" + - "firenetranoyfirenzefirestonefirmdalegoldpoint2thisamitsukefishin" + - "golffanschoenbrunnfitjarvodkafjordvalledaostargetmyiphostre-tote" + - "ndofinternet-dnschokokekschokoladenfitnessettlementransportefjal" + - "erflesbergulenflickragerogerscholarshipschoolschulezajskasuyanai" + - "zunzenflightschulserverflirfloginlinefloraflorencefloridatsunanj" + - "oetsuwanouchikujogaszkolancashirecreationfloripaderbornfloristan" + - "ohatakaharuslivinghistoryflorokunohealthcareerschwarzgwangjunipe" + - "rflowerschweizfltransurlflynnhosting-clusterfndfor-ourfor-somedi" + - "zinhistorischesciencecentersciencehistoryfor-theaterforexrothach" + - "irogatakaokalmykiaforgotdnscientistordalforli-cesena-forlicesena" + - "forlillehammerfeste-ipatriaforsaleikangerforsandasuologoipavianc" + - "arrdfortalfortmissoulancasterfortworthadanorthwesternmutualfosne" + - "scjohnsonfotaruis-a-democratrapaniizafoxfordebianfozfredrikstadt" + - "vscrapper-sitefreeddnsgeekgalaxyfreedesktopensocialfreemasonryfr" + - "eesitexaskoyabearalvahkikuchikuseikarugalsaceofreetlscrappingunm" + - "anxn--1ctwolominamatarnobrzegyptianfreiburguovdageaidnusrcfastly" + - "lbananarepublicaseihicampobassociatest-iservecounterstrikehimeji" + - "itatebayashijonawatempresashibetsukuiiyamanouchikuhokuryugasakit" + - "auraustinnaval-d-aosta-valleyokosukanumazuryokoteastcoastaldefen" + - "ceatonsbergivingjemnes3-eu-central-1freseniuscountryestateofdela" + - "wareggio-calabriafribourgushikamifuranorth-kazakhstanfriuli-v-gi" + - "uliafriuli-ve-giuliafriuli-vegiuliafriuli-venezia-giuliafriuli-v" + - "eneziagiuliafriuli-vgiuliafriuliv-giuliafriulive-giuliafriuliveg" + - "iuliafriulivenezia-giuliafriuliveneziagiuliafriulivgiuliafrlfrog" + - "anscrysechocolatelemarkarumaifarsundyndns-homednsamsungmodelling" + - "mxn--12c1fe0bradescotlandyndns-iparochernigovernmentoyotaparsand" + - "nessjoenishiokoppegardyndns-mailubindalublindesnesandoyfrognfrol" + - "andfrom-akrehamnfrom-alfrom-arfrom-azfrom-capetownnews-stagingwi" + - "ddleksvikaszubyfrom-coffeedbackplaneapplinzis-a-designerfrom-ctr" + - "avelchannelfrom-dchofunatoriginstitutelevisionthewifiatoyotomiya" + - "zakinuyamashinatsukigatakashimarnardalucaniafrom-dedyn-berlincol" + - "nfrom-flanderserveirchonanbulsan-suedtiroluccarbonia-iglesias-ca" + - "rboniaiglesiascarboniafrom-gaulardalfrom-hichisochildrensgardenf" + - "rom-iafrom-idfrom-ilfrom-in-brbar0emmafann-arboretumbriamallamac" + - "eiobbcg12038from-kserveminecraftravelersinsurancefrom-kyowariasa" + - "hikawawiiheyakumoduminamifuranofrom-lanciafrom-mamurogawafrom-md" + - "from-meeresistancefrom-mifunefrom-mnfrom-modalenfrom-mservemp3fr" + - "om-mtnfrom-nctulangevagrigentomologyeonggiehtavuoatnabudapest-a-" + - "la-masion-riopretobamaceratabuseating-organichoseiroumuenchenish" + - "itosashimizunaminamibosogndalucernefrom-ndfrom-nefrom-nh-servebl" + - "ogsiteleafamilycompanyanagawafflecellclaimservep2pfizerfrom-njaw" + - "orznoticiasnesoddenmarkhangelskjakdnepropetrovskiervaapsteiermar" + - "katowicefrom-nminamiiserniafrom-nvallee-aosteroyfrom-nyfrom-ohku" + - "rafrom-oketogurafrom-orfrom-padovaksdalfrom-pratohmandalfrom-ris" + - "-a-doctorayfrom-schmidtre-gauldalfrom-sdfrom-tnfrom-txn--1lqs03n" + - "from-utsiracusaikisarazurecontainerdpolicefrom-val-daostavalleyf" + - "rom-vtrdfrom-wafrom-wiardwebhostingxn--1lqs71dfrom-wvallee-d-aos" + - "teigenfrom-wyfrosinonefrostalowa-wolawafroyahooguyfstcgroupgfogg" + - "iafujiiderafujikawaguchikonefujiminokamoenairguardiannakadomarin" + - "ebraskauniversitychyattorneyagawakembuchikumagayagawakkanaibetsu" + - "bamericanfamilydsclouderackmazerbaijan-mayen-rootaribeiraogashim" + - "adachicagoboatservepicservequakefujinomiyadattowebcampinashikimi" + - "nohostfoldnavyfujiokayamangonohejis-a-financialadvisor-aurdalfuj" + - "isatoshonairlinedre-eikerfujisawafujishiroishidakabiratoridefens" + - "eljordfujitsurugashimangyshlakasamatsudopaasiafujixeroxn--1qqw23" + - "afujiyoshidavvenjargap-northeast-3fukayabeatservesarcasmatartand" + - "designfukuchiyamadavvesiidappnodebalancertificationfukudomigawaf" + - "ukuis-a-geekatsushikabeeldengeluidfukumitsubishigakishiwadazaifu" + - "daigojomedio-campidano-mediocampidanomediofukuokazakisofukushima" + - "niwakuratextileirfjordfukuroishikarikaturindalfukusakisosakitaga" + - "wafukuyamagatakahatakaishimoichinosekigaharafunabashiriuchinadaf" + - "unagatakamatsukawafunahashikamiamakusatsumasendaisennangooglecod" + - "espotrentin-sud-tirolfundaciofunkfeuerfuoiskujukuriyamannore-og-" + - "uvdalfuosskoczowildlifedorainfracloudfrontdoorfurnitureggio-emil" + - "ia-romagnakasatsunairportland-4-salernoboribetsuckservicesevasto" + - "polefurubirafurudonostiaafurukawairtelebitbridgestonekobayashiks" + - "hacknetcimbar1fusodegaurafussaintlouis-a-anarchistoireggiocalabr" + - "iafutabayamaguchinomihachimanagementrentin-sudtirolfutboldlygoin" + - "gnowhere-for-morenakatombetsumitakagiizefuttsurugimperiafuturecm" + - "sevenassisicilyfuturehostingfuturemailingfvgfyresdalhangoutsyste" + - "mscloudhannanmokuizumodenakayamapartmentsharpharmacienshawaiijim" + - "aritimoldeloittemp-dnshellaspeziahannosegawahanyuzenhapmircloudh" + - "arstadharvestcelebrationhasamarburghasaminami-alpshimokawahashba" + - "nghasudahasura-appharmacyshimokitayamahasvikatsuyamarugame-hosty" + - "hostinghatogayaizuwakamatsubushikusakadogawahatoyamazakitakamiiz" + - "umisanofidelityhatsukaichikaiseiheijis-a-landscaperugiahattfjell" + - "dalhayashimamotobungotakadancehazuminobusells-for-utwentehelsink" + - "itakatakarazukaluganskygearapphdfcbankaufenhembygdsforbundhemnes" + - "himonitayanagithubusercontentrentin-suedtirolhemsedalhepforgeher" + - "okusslattuminamiizukaminoyamaxunjargaheroyhgtvalleeaosteinkjerus" + - "alembroideryhidorahigashiagatsumagoianiahigashichichibunkyonanao" + - "shimageandsoundandvisionrenderhigashihiroshimanehigashiizumozaki" + - "takyushuaiahigashikagawahigashikagurasoedahigashikawakitaaikitam" + - "ihamadahigashikurumeetrentino-a-adigehigashimatsushimarcheapigee" + - "lvinckautokeinotteroyhigashimatsuyamakitaakitadaitoigawahigashim" + - "urayamamotorcycleshimonosekikawahigashinarusells-itrentino-aadig" + - "ehigashinehigashiomitamamurausukitamotosumy-gatewayhigashiosakas" + - "ayamanakakogawahigashishirakawamatakasagopocznorfolkebibleirvika" + - "zoologyhigashisumiyoshikawaminamiaikitanakagusukumodernhigashits" + - "unoshiroomurahigashiurawa-mazowszexnetrentino-alto-adigehigashiy" + - "amatokoriyamanashiibahccavuotnagaraholtaleniwaizumiotsukumiyamaz" + - "onawsmpplanetariuminamimakis-a-lawyerhigashiyodogawahigashiyoshi" + - "nogaris-a-liberalhiraizumisatohnoshoooshikamaishimofusartshimosu" + - "walkis-a-libertarianhirakatashinagawahiranairtrafficplexus-1hira" + - "rahiratsukagawahirayakagehistorichouseshimotsukehitachiomiyagild" + - "eskaliszhitachiotagotembaixadahitraeumtgeradelmenhorstalbanshimo" + - "tsumahjartdalhjelmelandholeckochikushinonsenergyholidayhomegoods" + - "hinichinanhomeiphiladelphiaareadmyblogspotrentino-altoadigehomel" + - "inkitoolsztynsettlershinjournalismailillesandefjordhomelinuxn--2" + - "m4a15ehomeofficehomesecuritymacaparecidahomesecuritypchoshibuyac" + - "htsandvikcoromantovalle-d-aostatic-accessanfranciscofreakunemuro" + - "rangehirnrtoyotsukaidohtawaramotoineppueblockbustermezhomesensee" + - "ringhomeunixn--2scrj9choyodobashichikashukujitawarahondahongotpa" + - "ntheonsitehonjyoitakasakitashiobarahornindalhorsellsyourhomeftph" + - "ilatelyhorteneis-a-linux-useranishiaritabashikaoirminamiminowaho" + - "spitalhoteleshinjukumanowtvalleedaostehotmailhoyangerhoylandetro" + - "itskypehumanitieshinkamigotoyohashimototalhurdalhurumajis-a-llam" + - "arriottrentino-s-tirolhyllestadhyogoris-a-musicianhyugawarahyund" + - "aiwafuneis-very-evillageis-very-goodyearis-very-niceis-very-swee" + - "tpepperis-with-thebandownloadisleofmanaustdaljetztrentino-sudtir" + - "oljevnakershuscultureggioemiliaromagnamsosnowiechristiansburgret" + - "akanabeautysvardoesntexisteingeekasaokamikoaniikappuboliviajessh" + - "eimpertrixcdn77-ssldyndns-office-on-the-weberjewelryjewishartgal" + - "leryjfkfhappoujgorajlljls-sto1jmphotographysiojnjcloudjiffylkesb" + - "iblackbaudcdn77-securebungoonord-odaljoyentrentino-sued-tiroljoy" + - "okaichibajddarchitecturealtorlandjpnjprshirakokamiminershiranuka" + - "mitsuejurkosakaerodromegallupinbarclaycards3-sa-east-1koseis-a-p" + - "ainteractivegaskvollkosherbrookegawakoshimizumakizunokunimimatak" + - "ayamarylandkoshunantankharkivanylvenicekosugekotohiradomainsureg" + - "ruhostingkotourakouhokutamakis-a-patsfankounosupplieshiraois-a-p" + - "ersonaltrainerkouyamashikekouzushimashikis-a-photographerokuapph" + - "ilipsynology-diskstationkozagawakozakis-a-playershifteditchyouri" + - "phoenixn--30rr7ykozowinbarclays3-us-east-2kpnkppspdnshiraokamoga" + - "wakrasnikahokutokashikis-a-republicancerresearchaeologicaliforni" + - "akrasnodarkredstonekristiansandcatshiratakahagitlaborkristiansun" + - "dkrodsheradkrokstadelvaldaostarostwodzislawindmillkryminamioguni" + - "5kumatorinokumejimasoykumenantokigawakunisakis-a-rockstarachowic" + - "ekunitachiarailwaykunitomigusukumamotoyamashikokuchuokunneppubtl" + - "shishikuis-a-socialistdlibestadkunstsammlungkunstunddesignkuokgr" + - "oupilotshisognekurehabmerkurgankurobelaudibleasingleshisuifuette" + - "rtdasnetzkurogiminamiashigarakuroisoftwarezzokuromatsunais-a-sox" + - "fankurotakikawasakis-a-studentalkushirogawakustanais-a-teacherka" + - "ssyno-dshinshinotsurgerykusupplynxn--3bst00minamisanrikubetsurfa" + - "uskedsmokorsetagayaseralingenoamishirasatogokasells-for-lessauhe" + - "radynv6kutchanelkutnokuzumakis-a-techietis-a-nascarfankvafjordkv" + - "alsundkvamfamberkeleykvanangenkvinesdalkvinnheradkviteseidatingk" + - "vitsoykwpspectruminamitanekzmishimatsumaebashimodatemissileluxem" + - "bourgmisugitokuyamatsumotofukemitourismolanxesshitaramamitoyoake" + - "miuramiyazurewebsiteshikagamiishibukawamiyotamanomjondalenmlbfan" + - "montrealestatefarmequipmentrentinoa-adigemonza-brianzapposhizuku" + - "ishimogosenmonza-e-della-brianzaptokyotangotsukitahatakamoriokak" + - "egawamonzabrianzaramonzaebrianzamonzaedellabrianzamoonscaleforce" + - "mordoviamoriyamatsunomoriyoshiminamiawajikis-an-actormormonsterm" + - "oroyamatsusakahoginankokubunjis-an-actresshintokushimamortgagemo" + - "scowindowskrakowinnershizuokanagawamoseushistorymosjoenmoskenesh" + - "oppingmosshopwarendalenugmosvikhersonmoteginowaniihamatamakawaji" + - "mansionshoujis-an-anarchistoricalsocietymoviemovimientolgamozill" + - "a-iotrentinoaadigemtranbymuenstermuginozawaonsenmuikamiokameokam" + - "akurazakiwakunigamiharumukoebenhavnmulhouseoullensvanguardmunaka" + - "tanemuncienciamuosattemupimientakkoelnmurmanskhmelnitskiyamarumo" + - "rimachidamurotorcraftrentinoalto-adigemusashimurayamatsushigemus" + - "ashinoharamuseetrentinoaltoadigemuseumverenigingmusicargodaddyn-" + - "vpndnshowamutsuzawamy-vigorgemy-wanggouvichristmaseratiresangomu" + - "tashinainvestmentsanjotoyouramyactivedirectorymyasustor-elvdalmy" + - "cdmydattolocalhistorymyddnskingmydissentrentinos-tirolmydobisshi" + - "kis-an-artistgorymydroboehringerikemydshowtimelhusdecorativearts" + - "hriramlidlugolekadenagahamaroygardendoftheinternetlifyis-an-engi" + - "neeringmyeffectrentinostirolmyfastly-terrariuminamiuonumasudamyf" + - "irewallonieruchomoscienceandindustrynmyforuminamiyamashirokawana" + - "belembetsukubankharkovaomyfritzmyftpaccesshwiosienarutomobellevu" + - "elosangelesjabbottrentinosud-tirolmyhome-servermyjinomykolaivare" + - "servehalflifestylemymailermymediapchromedicaltanissettaishinomak" + - "inkobeardubaiduckdnsannanishiwakinzais-a-candidatemyokohamamatsu" + - "damypepinkhmelnytskyivaporcloudmypetsigdalmyphotoshibalatinogift" + - "silkhplaystation-cloudmypicturesimple-urlmypsxn--3ds443gmysecuri" + - "tycamerakermyshopblocksirdalmythic-beastsjcbnpparibaselburgmytis" + - "-a-bookkeeperspectakasugais-an-entertainermytuleaprendemasakikon" + - "aikawachinaganoharamcoachampionshiphoptobishimadridvagsoyermyvnc" + - "hungnamdalseidfjordyndns-picsannohelplfinancialukowhalingrimstad" + - "yndns-remotewdyndns-serverisignissandiegomywirepaircraftingvollo" + - "mbardiamondslupsklabudhabikinokawabarthadselectrentin-sued-tirol" + - "platformshangrilapyplatter-appioneerplatterpippugliaplazaplcube-" + - "serverplumbingoplurinacionalpodhalevangerpodlasiellaktyubinskipt" + - "veterinaireadthedocscappgafannefrankfurtrentinosudtirolpodzonepo" + - "hlpoivronpokerpokrovsknx-serversicherungpoliticarrierpolitiendap" + - "olkowicepoltavalle-aostathellewismillerpomorzeszowitdkomaganepon" + - "pesaro-urbino-pesarourbinopesaromasvuotnaritakurashikis-bytomari" + - "timekeepingponypordenonepornporsangerporsangugeporsgrunnanyokosh" + - "ibahikariwanumatamayufuelveruminanopoznanpraxis-a-bruinsfanprdpr" + - "eservationpresidioprgmrprimelbourneprincipeprivatizehealthinsura" + - "nceprofesionalprogressivenneslaskerrylogisticsnoasakakinokiaprom" + - "ombetsurgeonshalloffameiwamassa-carrara-massacarraramassabusines" + - "sebykleclerchurcharternidyndns-webhareidsbergentingripepropertyp" + - "rotectionprotonetrentinosued-tirolprudentialpruszkowithgoogleapi" + - "szprvcyberlevagangaviikanonjis-certifieducatorahimeshimamateramo" + - "baraprzeworskogptplusgardenpulawypupittsburghofficialpvhagakhana" + - "migawapvtrentinosuedtirolpwcircustomer-ociprianiigataitogitsulda" + - "luroypzqhagebostadqldqponiatowadaqslingqualifioappiwatequickconn" + - "ectrentinsud-tirolquicksytestingquipelementsokananiimihoboleslaw" + - "iecistrondheimmobilienissayokkaichiropractichernovtsyncloudyndns" + - "-at-homedepotenzamamidsundyndns-at-workisboringlugmbhartipscbgmi" + - "nakamichiharaqvcitadeliveryggeesusonosuzakanazawasuzukaneyamazoe" + - "suzukis-into-animegurownprovidersvalbardunloppacificitichirurgie" + - "ns-dentistes-en-francesvcivilaviationissedalutskashibatakatsukiy" + - "osatokamachintaifun-dnsaliasanokashiharasveiosvelvikommunalforbu" + - "ndsvizzerasvn-reposolutionsokndalswidnicasacamdvrcampinagrandebu" + - "ilderschlesischesomaswidnikkokonoeswiebodzin-butterswiftcoverswi" + - "noujscienceandhistoryswissmarterthanyousynology-dsomnarviikamisa" + - "tokaizukameyamatotakadatuscanytushuissier-justicetuvalle-daostat" + - "icsor-varangertuxfamilytwmailvestre-slidreportrevisohughesoovest" + - "re-totennishiawakuravestvagoyvevelstadvibo-valentiavibovalentiav" + - "ideovillasorocabalestrandabergamo-siemensncfdvinnicasadelamoneda" + - "pliernewportlligatritonvinnytsiavipsinaappixolinovirginiavirtual" + - "-userveftpizzavirtualservervirtualuservegame-servervirtueeldomei" + - "n-vigorlicevirtuelvisakegawaviterboknowsitallvivolkenkundenvixn-" + - "-3hcrj9civilisationisshinguccircleverappsantabarbaravlaanderenvl" + - "adikavkazimierz-dolnyvladimirvlogintoyonezawavminiservervologdan" + - "skomonowruzhgorodeovolvolkswagentsorreisahayakawakamiichikawamis" + - "atottoris-foundationvolyngdalvoorloperauniterois-into-carshintom" + - "ikasaharavossevangenvotevotingvotoyonowmcloudwmflabsortlandwnext" + - "directrogstadworldworse-thandawowithyoutuberspacekitagatargitpag" + - "efrontappkmpspbar2wpdevcloudwpenginepoweredwritesthisblogsytewro" + - "clawiwatsukiyonotairestaurantroandinosaurepbodynamic-dnsopotrent" + - "insudtirolwtcminnesotaketaketomisatokorozawawtfbsbxn--1ck2e1banz" + - "aicloudcontrolledekagaminombresciaustraliajudaicable-modemocraci" + - "abruzzoologicalvinklein-addrammenuorochesterimo-i-rana4u2-localh" + - "ostrowiec66wuozuwzmiuwajimaxn--45q11civilwarmiaxn--4gbriminingxn" + - "--4it168dxn--4it797kongsbergxn--4pvxs4allxn--54b7fta0cclanbibaid" + - "armeniaxn--55qw42gxn--55qx5dxn--5js045dxn--5rtp49cldmailovecolle" + - "gefantasyleaguernseyxn--5rtq34kongsvingerxn--5su34j936bgsgxn--5t" + - "zm5gxn--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264clic" + - "20001wwwhoswhokksundyndns-wikirkenesantacruzsantafedjejuifmetace" + - "ntrumeteorappartis-a-catererxn--80adxhksorumincomcastresindevice" + - "nzaporizhzhiaxn--80ao21axn--80aqecdr1axn--80asehdbarefootballoon" + - "ingjerdrumckinseyolasiteu-1xn--80aswgxn--80augustowloclawekomoro" + - "tsukaminokawanishiaizubangexn--8ltr62koninjambylxn--8pvr4uxn--8y" + - "0a063axn--90a3academiamicaaarborteaches-yogasawaracingxn--90aero" + - "portalabamagasakishimabaraogakibichuoxn--90aishobarakawagoexn--9" + - "0azhytomyravendbargainstantcloudfunctionswedenvironmentalconserv" + - "ationfabricafederationionjukudoyamaintenanceu-2xn--9dbhblg6digit" + - "alxn--9dbq2axn--9et52uxn--9krt00axn--andy-iraxn--aroport-byaotsu" + - "rreyxn--asky-iraxn--aurskog-hland-jnbarreauction-webhopenairbusa" + - "ntiquest-a-la-maisondre-landroidiscourses3-us-gov-west-1xn--aver" + - "y-yuasakuhokkaidovre-eikerxn--b-5gaxn--b4w605ferdxn--balsan-sdti" + - "rol-nsbsoundcastronomy-routerxn--bck1b9a5dre4clickashiwaraxn--bd" + - "ddj-mrabdxn--bearalvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--" + - "bhccavuotna-k7axn--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-f" + - "yasakaiminatoyookaniepcexn--bjddar-ptarumizusawaxn--blt-elabourx" + - "n--bmlo-graingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--br" + - "nny-wuacademy-firewall-gatewayxn--brnnysund-m8accident-investiga" + - "tion-aptibleadpagest-mon-blogueurovision-k3southcarolinarvikomat" + - "sushimarylhurstjordalshalsenxn--brum-voagatromsakataobaomoriguch" + - "iharahkkeravjuegoshikijobservableusercontentrentoyonakagyokutoya" + - "kolobrzegersundxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarrel-of-kn" + - "owledgeapplicationcloudappspotagerevistaples3-us-west-1xn--c1avg" + - "xn--c2br7gxn--c3s14mintereitrentino-suedtirolxn--cck2b3barrell-o" + - "f-knowledgestack12xn--cckwcxetdxn--cesena-forl-mcbremangerxn--ce" + - "senaforl-i8axn--cg4bkis-into-cartoonshinyoshitomiokamitondabayas" + - "hiogamagoriziaxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a2" + - "oxn--correios-e-telecomunicaes-ghc29axn--czr694barsycenterprises" + - "akimobetsuitainaioirasebastopologyeongnamegawakayamagazineat-url" + - "illyombolzano-altoadigeorgeorgiaustrheimatunduhrennesoyokozebina" + - "gisoccertmgrazimutheworkpccwebredirectmembers3-eu-west-1xn--czrs" + - "0tromsojamisonxn--czru2dxn--czrw28barsyonlinewhampshirealtysnes3" + - "-us-west-2xn--d1acj3bashkiriauthordalandeportenrivnebinordreisa-" + - "hockeynutazuerichardlikescandyn53utilitiesquare7xn--d1alfaromeox" + - "n--d1atrusteexn--d5qv7z876clinichiryukyuragifuchungbukharavennag" + - "asakindlecznagasukexn--davvenjrga-y4axn--djrs72d6uyxn--djty4kons" + - "kowolayangroupiemontexn--dnna-grajewolterskluwerxn--drbak-wuaxn-" + - "-dyry-iraxn--e1a4cliniquenoharaxn--eckvdtc9dxn--efvn9southwestfa" + - "lenxn--efvy88haibarakitahiroshimaoris-a-greenxn--ehqz56nxn--elqq" + - "16hair-surveillancexn--eveni-0qa01gaxn--f6qx53axn--fct429konsula" + - "trobeepilepsykkylvenetodayxn--fhbeiarnxn--finny-yuaxn--fiq228c5h" + - "sowaxn--fiq64basicservercelliguriautomotiveconomiastagemological" + - "lyngenflfanquanpachigasakihokumakogenebakkeshibechambagriculture" + - "nnebudejjuedischesapeakebayernufcfanavigationavoizumizakibmdevel" + - "opmentatsunobiramusementdllpages3-ap-southeast-2ix4432-balsan-su" + - "edtirolkuszczytnoipirangamvik-serverrankoshigayachimataikikugawa" + - "lesundd-dnshome-webserverdal-o-g-i-n4tatarantours3-ap-northeast-" + - "2xn--fiqs8speedpartnersolarssonxn--fiqz9sphinxn--3e0b707exn--fjo" + - "rd-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-cesen" + - "a-fcbsspjelkavikomforbarcelonagawalmartattoolforgemreviewsaitosh" + - "imayfirstockholmestrandgcahcesuoloans3-fips-us-gov-west-1xn--for" + - "lcesena-c8axn--fpcrj9c3dxn--frde-grandrapidspreadbettingxn--frna" + - "-woaraisaijosoyrorospydebergxn--frya-hraxn--fzc2c9e2clintonoshoe" + - "santamariakexn--fzys8d69uvgmailxn--g2xx48clothingdustdataiwanair" + - "forcebetsuikidsmynasushiobaragusabaejrietisalatinabenonicbcn-nor" + - "th-1xn--gckr3f0fbx-ostrowwlkpmgruexn--gecrj9cn-northwest-1xn--gg" + - "aviika-8ya47hakatanortonxn--gildeskl-g0axn--givuotna-8yasugivest" + - "bytemarkonyvelolipoppdalxn--gjvik-wuaxn--gk3at1exn--gls-elacaixa" + - "xn--gmq050is-into-gamessinazawaxn--gmqw5axn--h-2failxn--h1aeghak" + - "odatexn--h2breg3evenesrlxn--h2brj9c8cngriwataraidyndns-workshopi" + - "tsitevadsobetsumidatlantichitachinakagawashtenawdev-myqnapcloude" + - "itysfjordyndns-blogdnsamsclubartowfarmsteadyndns-freeboxosloftoy" + - "osatoyokawaxn--h3cuzk1discountyxn--hbmer-xqaxn--hcesuolo-7ya35ba" + - "silicataniautoscanadaeguambulancechirealmpmnavuotnapleskns3-eu-w" + - "est-2xn--hery-iraxn--hgebostad-g3axn--hkkinen-5waxn--hmmrfeasta-" + - "s4accident-prevention-rancherkasydneyxn--hnefoss-q1axn--hobl-ira" + - "xn--holtlen-hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hyland" + - "et-54axn--i1b6b1a6a2exn--imr513nxn--indery-fyasuokanoyaltakatori" + - "s-leetrentino-stirolxn--io0a7is-lostrodawaraxn--j1aefbxosavannah" + - "gaxn--j1amhakonexn--j6w193gxn--jlq480n2rgxn--jlq61u9w7basketball" + - "finanzgoraveroykengerdalces3-eu-west-3xn--jlster-byatominamidait" + - "omanchesterxn--jrpeland-54axn--jvr189misakis-a-therapistoiaxn--k" + - "7yn95exn--karmy-yuaxn--kbrq7oxn--kcrx77d1x4axn--kfjord-iuaxn--kl" + - "bu-woaxn--klt787dxn--kltp7dxn--kltx9axn--klty5xn--3oq18vl8pn36ax" + - "n--koluokta-7ya57hakubahcavuotnagaivuotnagaokakyotambabyenglandx" + - "n--kprw13dxn--kpry57dxn--kput3is-not-certifiedugit-pagespeedmobi" + - "lizeroticanonoichinomiyakexn--krager-gyatsukanraxn--kranghke-b0a" + - "xn--krdsherad-m8axn--krehamn-dxaxn--krjohka-hwab49jdevcloudnshir" + - "ahamatonbetsurnadalxn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-fyatsu" + - "shiroxn--kvnangen-k0axn--l-1fairwindsrvarggatrentinsued-tirolxn-" + - "-l1accentureklamborghinikolaeventstoregontrailroadxn--laheadju-7" + - "yawaraxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika-52" + - "batochiokinoshimaizuruhrhcloudiscoveryomitanobninskaracoldwarsza" + - "wavocatanzarowebspacebizenakanojohanamakinoharaukraanghkeymachin" + - "eustargardds3-ca-central-1xn--lesund-huaxn--lgbbat1ad8jdfastvps-" + - "serveronakanotoddenxn--lgrd-poacctrvaroyxn--lhppi-xqaxn--linds-p" + - "ramericanartrycloudflareplantationxn--lns-qlaquilanstorfjordxn--" + - "loabt-0qaxn--lrdal-sraxn--lrenskog-54axn--lt-liacnpyatigorskodje" + - "ffersonxn--lten-granexn--lury-iraxn--m3ch0j3axn--mely-iraxn--mer" + - "ker-kuaxn--mgb2ddestorjcphonefosshioyandexcloudxn--mgb9awbfedora" + - "peoplegnicapebretonamicrosoftbankasukabedzin-berlindasdaburxn--m" + - "gba3a3ejtrysiljanxn--mgba3a4f16axn--mgba3a4franamizuholdingstpet" + - "ersburgxn--mgba7c0bbn0axn--mgbaakc7dvfedoraprojectraniandriabarl" + - "ettatraniandriaxn--mgbaam7a8hakuis-a-gurustkannamilanotogawaxn--" + - "mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjordishakotanayor" + - "ovigovtaxihuanfshostrolekamishihoronobeauxartsandcrafts3-website" + - "-ap-northeast-1xn--mgbai9azgqp6jelasticbeanstalkddietnedalxn--mg" + - "bayh7gpaleoxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgberp" + - "4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgbp" + - "l2fhskydivingxn--mgbqly7c0a67fbcnsantoandreamhostersanukis-a-cel" + - "ticsfanxn--mgbqly7cvafranziskanerimaringatlantakahashimamakiryuo" + - "hdattorelayxn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhausposts" + - "-and-telecommunications3-website-ap-southeast-1xn--mgbx4cd0abbvi" + - "eeexn--mix082feiraquarelleaseeklogesaveincloudynvpnplus-4xn--mix" + - "891fermochizukirovogradoyxn--mjndalen-64axn--mk0axin-dslgbtuneso" + - "r-odalxn--mk1bu44cntoystre-slidrettozawaxn--mkru45is-savedunetfl" + - "ixilxn--mlatvuopmi-s4axn--mli-tlarvikooris-a-nursembokukitchenxn" + - "--mlselv-iuaxn--moreke-juaxn--mori-qsakuragawaxn--mosjen-eyawata" + - "hamaxn--mot-tlavagiskexn--mre-og-romsdal-qqbuserveexchangexn--ms" + - "y-ula0hakusanagochijiwadell-ogliastraderxn--mtta-vrjjat-k7aflaks" + - "tadaokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1misasaguris-an" + - "-accountantshinshiroxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--3pxu8kom" + - "vuxn--32vp30haebaruericssongdalenviknakatsugawaxn--nit225kopervi" + - "khakassiaxn--nmesjevuemie-tcbalsan-sudtirollagdenesnaaseinet-fre" + - "akstreamswatch-and-clockerxn--nnx388axn--nodessakurais-slickazun" + - "ow-dnshiojirishirifujiedaxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iq" + - "x3axn--ntsq17gxn--nttery-byaeservehttplantslzxn--nvuotna-hwaxn--" + - "nyqy26axn--o1acheltenham-radio-opencraftrainingxn--o3cw4haldenxn" + - "--o3cyx2axn--od0algorithmiasakuchinotsuchiurakawaxn--od0aq3benev" + - "entoeidskoguchikuzenhktcp4xn--ogbpf8flekkefjordxn--oppegrd-ixaxn" + - "--ostery-fyaxn--osyro-wuaxn--otu796dxn--p1acferraraxn--p1ais-ube" + - "rleetrentino-sud-tirolxn--pgbs0dhlxn--porsgu-sta26ferraris-a-cub" + - "icle-slavellinodeobjectsaves-the-whalessandria-trani-barletta-an" + - "driatranibarlettaandriaxn--pssu33lxn--pssy2uxn--q9jyb4collection" + - "xn--qcka1pmcdirxn--qqqt11misawaxn--qxa6axn--qxamuneuestudioxn--r" + - "ady-iraxn--rdal-poaxn--rde-ulavangenxn--rdy-0nabaris-very-badajo" + - "zxn--rennesy-v1axn--rhkkervju-01aferrerotikagoshimalvikasumigaur" + - "ayasudaxn--rholt-mragowoodsidemonmouthalsaitamatsukuris-a-hard-w" + - "orkersewilliamhillxn--rhqv96gxn--rht27zxn--rht3dxn--rht61exn--ri" + - "sa-5nativeamericanantiquestudynamisches-dnsolognexn--risr-iraxn-" + - "-rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31hammarfeastafric" + - "apitalonewmexicodyn-o-saurlandesharis-a-hunterxn--rovu88bentleyo" + - "nagoyavoues3-external-1xn--rros-granvindafjordxn--rskog-uuaxn--r" + - "st-0naturalhistorymuseumcenterxn--rsta-francaiseharaxn--rvc1e0am" + - "3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithamurakamigoris-a-knight" + - "pointtohobby-sitexn--s9brj9colognewyorkshirecifedexeterxn--sandn" + - "essjen-ogbeppublishproxyzgorzeleccogjerstadotsuruokakamigaharaxa" + - "urskog-holandinggfarmerseine164-baltimore-og-romsdalipayboltates" + - "hinanomachimkentateyamaetnaamesjevuemielno-ipifonyaarpalmasfjord" + - "enaturhistorisches3-ap-southeast-1xn--sandy-yuaxn--sdtirol-n2axn" + - "--seral-lraxn--ses554gxn--sgne-graphoxn--42c2d9axn--skierv-utaza" + - "stuff-4-salexn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn--sknland-" + - "fxaxn--slat-5naturalsciencesnaturellestufftoread-booksnesolundbe" + - "ckomakiyosunndalxn--slt-elabcieszynxn--smla-hraxn--smna-gratange" + - "ntlentapisa-geekoryokamikawanehonbetsurutaharaxn--snase-nraxn--s" + - "ndre-land-0cbeskidyn-ip24xn--snes-poaxn--snsa-roaxn--sr-aurdal-l" + - "8axn--sr-fron-q1axn--sr-odal-q1axn--sr-varanger-ggbestbuyshouses" + - "3-website-ap-southeast-2xn--srfold-byaxn--srreisa-q1axn--srum-gr" + - "atis-a-bulls-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-" + - "sqbetainaboxfusejnymemergencyahabaghdadiskussionsbereichaseljeep" + - "sondriodejaneirockartuzyonagunicommbankaragandaxn--stre-toten-zc" + - "bhzcasertairaumalborkarasjohkamikitayamatsurin-the-bandain-vpnca" + - "sinordkappalmspringsakerxn--t60b56axn--tckweatherchannelxn--tiq4" + - "9xqyjelenia-goraxn--tjme-hraxn--tn0agrinetbankosaigawaxn--tnsber" + - "g-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tirol-rzbieidsvollim" + - "anowarudaxn--trentin-sdtirol-7vbrplsbxn--45br5cylxn--trentino-sd" + - "-tirol-c3bielawaltervistaipeigersundisrechtranakaiwamizawatchand" + - "clockarasjokarasuyamarshallstatebankarateu-3xn--trentino-sdtirol" + - "-szbiellaakesvuemielecceu-4xn--trentinosd-tirol-rzbieszczadygeya" + - "chiyodaejeonbukcoalvdalaheadjudygarlandivtasvuodnakamagayahikobi" + - "erzycevje-og-hornnes3-website-eu-west-1xn--trentinosdtirol-7vbie" + - "vat-band-campaniaxn--trentinsd-tirol-6vbifukagawashingtondclkara" + - "tsuginamikatagamilitaryoriikareliancextraspace-to-rentalstomakom" + - "aibaraxn--trentinsdtirol-nsbigv-infoodnetworkangerxn--trgstad-r1" + - "axn--trna-woaxn--troms-zuaxn--tysvr-vraxn--uc0atvestfoldxn--uc0a" + - "y4axn--uist22handsonyoursidellogliastradingxn--uisz3gxn--unjrga-" + - "rtashkentunkommunexn--unup4yxn--uuwu58axn--vads-jraxn--valle-aos" + - "te-ebbturystykanmakiwielunnerxn--valle-d-aoste-ehbodollstuttgart" + - "rentinsuedtirolxn--valleaoste-e7axn--valledaoste-ebbvacationsusa" + - "kis-gonexn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctbihoro" + - "logyoshiokanzakiyokawaraxn--vermgensberatung-pwbikedaemoneyukinc" + - "heonhlfanhs3-website-sa-east-1xn--vestvgy-ixa6oxn--vg-yiabkhazia" + - "xn--vgan-qoaxn--vgsy-qoa0jeonnamerikawauexn--vgu402colonialwilli" + - "amsburgroks-thisayamanobeokakudamatsuexn--vhquvestnesorfoldxn--v" + - "ler-qoaxn--vre-eiker-k8axn--vrggt-xqadxn--vry-yla5gxn--vuq861bil" + - "baokinawashirosatochigiessensiositecnologiaxn--w4r85el8fhu5dnrax" + - "n--w4rs40lxn--wcvs22dxn--wgbh1coloradoplateaudioxn--wgbl6axn--xh" + - "q521billustrationredumbrellahppiacenzachpomorskienikonanporovnob" + - "serverxn--xkc2al3hye2axn--xkc2dl3a5ee0hangglidingxn--y9a3aquariu" + - "misconfusedxn--yer-znaturbruksgymnxn--yfro4i67oxn--ygarden-p1axn" + - "--ygbi2ammxn--45brj9civilizationiyodogawaxn--ystre-slidre-ujbioc" + - "eanographics3-website-us-east-1xn--zbx025dxn--zf0ao64axn--zf0avx" + - "lxn--zfr164birdartcenterprisecloudcontrolappleborkdalwaysdatabas" + - "eballangenkainanaerobatickets3-website-us-west-1xnbayxz" + "trosnubalsfjordd-dnshome-webserverdal-o-g-i-n4tatsunobihirosakik" + + "amijimatsuuragrocerybnikeisenbahnaturhistorisches3-ap-south-1bip" + + "almasfjordenikonanporovnocpalmspringsakerbirdartcenterprisecloud" + + "accesscambridgeiseiyoichippubetsubetsugarussiabirkenesoddtangeno" + + "varahkkeravjuegoshikilatironrenderbirthplacevje-og-hornnes3-webs" + + "ite-us-west-1bjarkoyukuhashimojin-the-bandain-vpncateringebuildi" + + "ngladegreextraspace-to-rentalstomakomaibarabjerkreimbamblebesbyg" + + "landroverhalla-speziaustevollaziobiramswatch-and-clockereviewsai" + + "toshimattelekommunikationatuurwetenschappengine164-baltimore-og-" + + "romsdalp1bjugnieznord-odalwaysdatabaseballangenkainanaejrietisal" + + "atinabenonicatholicaxiaskimitsubatamibugattiffanyaaarborteaches-" + + "yogasawara-rugbydgoszczecinemaceratabuseating-organicbcieszynino" + + "hekinannestadiyurihonjournalistjohninomiyakonojorpelandnpanamats" + + "uzakincheonirasakindianapolis-a-bloggerblackfridayusuharabloombe" + + "rgbauernishiazaindianmarketinglassassinationalheritagebloxcms3-w" + + "ebsite-us-west-2bluedagestangemologicallyngenishigoddabmoattachm" + + "ents5yusuisservehttpanasonichernivtsiciliabmsakyotanabellunord-f" + + "rontierbmwedeployuulmemsettlersalangenishiharabnrwegroweibologna" + + "gareyamakeupowiatmallorcafederation-webhopencraftrainingleezebom" + + "loabathsbchernovtsyncloudrangedalondrinamsskoganeindielddanuorri" + + "ndigenaklodzkodairabondigitaloceanographicsxboxenishiizunazukind" + + "owapblogsiteleafamilycompany-2bonnishikataketomisatomobellevuelo" + + "sangelesjabbottjeldsundray-dnstracebookinghosted-by-previderboom" + + "lair-traffic-controlleyuzawaboschaefflerdalorenskoglitcheltenham" + + "-radio-opensocialottebostikariyameiwamarugame-hostedpictetjmaxxx" + + "finitybostonakijinsekikogentappsalon-1botanicalgardenishikatsura" + + "git-reposaltdalottokonamegatakayamassa-carrara-massacarraramassa" + + "businessebykleclerchirurgiens-dentistes-en-francebotanicgardenis" + + "hikawazukamishihoronobeauxartsandcraftsaludrayddnsfreebox-osasco" + + "li-picenordlandraydnsupdaterbotanychiryukyuragifuchungbukharauma" + + "lborkarlsoybouncemerckmsdnipropetrovskjervoyageorgeorgiabounty-f" + + "ullensakerrypropertiesalvadordalibabalena-devicesalzburgliwicebo" + + "utiquebechitachinakagawatchandclockarmoybozen-sudtirolouvrehabme" + + "rbozen-suedtirolowiczest-le-patronishimerabplaceducatorahimeshim" + + "amateraholtalenishinomiyashironohtawaramotoineppueblockbusternii" + + "minamiawajikindustriabrandywinevalleybrasiliabrindisibenikimobet" + + "suitaipeigersundrivefsnillfjordrobaknoluoktachikawafflecellcube-" + + "serverbristoloseyouriparachutinglobalashovhachinohedmarkarpaczel" + + "adzlgloboavistanbulsan-sudtirolpusercontentjomeloyalistoragebrit" + + "ishcolumbialowiezaganishinoomotegomniweatherchannelubindalublind" + + "esnesamegawabroadcastlebtimnetzparaglidinglogoweirbroadwaybroke-" + + "itvedestrandrudupontariobranconakaniikawatanagurabrokerbronnoysu" + + "ndurbanamexhibitionishinoshimatsushigebrothermesaverdeatnulvikar" + + "uizawabrowsersafetymarketsamnangerbrumunddalucaniabrunelastxjava" + + "ld-aostarnbergloppenzaolbia-tempio-olbiatempioolbialystokkembuch" + + "ikumagayagawakayamagentositecnologiabrusselsampalacebruxellesams" + + "clubartowellbeingzonebryansklepparisor-fronishiokoppegardurhambu" + + "rglugsjcbnpparibaselburgmbhartipsselfiparliamentjxn--0trq7p7nnis" + + "hitosashimizunaminamibosogndaluccargodaddyn-o-saurlandesamsungmi" + + "nakamichiharabrynewhollandynathomebuiltwithdarkarumaifarmsteadyn" + + "dns-at-homedepotenzamamidsundyndns-at-workisboringmodellingmxn--" + + "11b4c3dyndns-blogdnsandnessjoenishiwakindustriesteamfamberkeleyb" + + "uskerudyndns-freeboxoslocus-4buzentsujiiebuzzwesteuropenairbusan" + + "tiquest-a-la-maisondre-landroidyndns-homednsandoybwestfalenissan" + + "diegomurabzhitomirumalatvuopmicrolightingretakamoriokakudamatsue" + + "bzzcompute-1computerhistoryofscience-fictioncomsecaaskoyabearalv" + + "ahkijobservableusercontentoyotsukaidocondoshichinohealth-careref" + + "ormitakeharaconferenceconstructionconsuladoesntexisteingeekashiw" + + "araconsultanthropologyconsultingrongausdalcontactoyouracontagema" + + "tsubaracontemporaryarteducationalchikugodogadollsapporocontracto" + + "rskenconventureshinodeartheworkpccwhoswhokksundyndns1cookingchan" + + "nelsdvrdnsdojoburgrossetouchihayaakasakawaharacoolcooperativano-" + + "frankivskolefrakkestadynnsardegnaroycopenhagencyclopedichonanbul" + + "san-suedtirolukowestus2coproductionsardiniacorporationcorsicanon" + + "oichinomiyakecorvettemp-dnsarlcosenzakopanelastycoffeedbackplane" + + "applinzinzais-a-candidatecosidnsfor-better-thanawatchesarpsborgr" + + "oundhandlingroznynysaintlouis-a-anarchistoireggio-emilia-romagna" + + "katombetsumitakagiizecostumedicinagatorodoycouchpotatofriesarufu" + + "tsunomiyawakasaikaitabashikaoizumizakis-a-caterercoukashiwazakiy" + + "okawaracouncilcouponsasayamayfirstockholmestrandynservebbsasebof" + + "ageologycozoracqcxn--12co0c3b4evalleaostavangercranbrookuwanalyt" + + "icsaskatchewancrdynuniversitycreditcardynv6creditunioncremonashg" + + "abadaddjaguarqhachiojiyaizuwakamatsubushikusakadogawacrewiencric" + + "ketrzyncrimeast-kazakhstanangercrotonexus-3crownipartsassaris-a-" + + "celticsfancrsvps-hostrolekagoshimalopolskanlandynvpnpluscountrye" + + "stateofdelawareclaimsaudacruisesauheradyroyrvikingrpartycryptono" + + "michigangwoncuisinellajollamericanexpressexyculturalcentertainme" + + "ntoystre-slidrettozawacuneocupcakecuritibaghdadcurvalled-aostave" + + "rncymrunjargacyonabarumetacentrumeteorappasadenarashinocyouthruh" + + "erecifedexeterferrarivneferrerotikakamigaharafetsundfguidell-ogl" + + "iastraderfhskydivinguitarsavonarusawafhvalerfidontexistmein-iser" + + "vebeerfieldfigueresinstagingujoinvilleirvikasserversaillesaxofil" + + "ateliafilegear-audnedalnfilegear-debianfilegear-gbizfilegear-ief" + + "ilegear-jpmorganfilegear-sg-1filminamifuranofinalfinancefinearts" + + "choenbrunnfinlandfinnoyfirebaseappassagenschokokekschokoladenfir" + + "enetrani-andria-barletta-trani-andriafirenzefirestonefirmdalegni" + + "capetownnews-stagingulenfishingoldpoint2thisamitsukefitjarvodkaf" + + "jordvagsoygardenflfanquanpachigasakievennodesabaerobaticketschol" + + "arshipschoolsztynsettsurgeonshalloffameldalfitnessettlementrania" + + "ndriabarlettatraniandriafjalerflesbergunmansionschulezajskasukab" + + "edzin-berlindasdaburflickragerogerschulserverflightschwarzgwangj" + + "uifminamiiserniaflirfloginlinefloraflorencefloridatsunanjoetsuwa" + + "nouchikujogaszkolancashirecipescaravantaarpassenger-associationf" + + "loripaderbornfloristanohatajiris-a-chefashionflorokunohealthcare" + + "erschweizflowersciencecentersciencehistoryfltranoyflynnhosting-c" + + "lusterfndfnwkasumigaurayasudafoodnetworkdalfor-ourfor-somedizinh" + + "istorischescientistordalfor-theaterforexrothachirogatakanabeauty" + + "sfjordforgotdnscjohnsonforli-cesena-forlicesenaforlikescandyn53f" + + "orsalegolffanscrapper-siteforsandasuoloftranslatefortalfortextil" + + "eikangerfortmissoulancasterfortworthadanorth-kazakhstanfosnescra" + + "ppinguovdageaidnunusualpersonfotaruis-a-conservativegarsheis-a-c" + + "padualstackasuyanaizuerichardlillesandefjordfoxafozfrancaisehara" + + "franziskanerimaringatlantakahamalvikaszubyfredrikstadtvscrysecur" + + "itytacticservehumourfreeddnsgeekgalaxyfreedesktopocznordreisa-ho" + + "ckeynutazurestaticappspotagerfreemasonryfreesitefreetlserveircho" + + "shibuyahabackyardsangomutashinainfinitintelligencefreiburgushika" + + "mifuranorfolkebibleitungsenfreseniusculturecreationfribourgwiddl" + + "eksvikatowicefriuli-v-giuliafriuli-ve-giuliafriuli-vegiuliafriul" + + "i-venezia-giuliafriuli-veneziagiuliafriuli-vgiuliafriuliv-giulia" + + "friulive-giuliafriulivegiuliafriulivenezia-giuliafriuliveneziagi" + + "uliafriulivgiuliafrlfroganserveminecraftransportefrognfrolandfro" + + "m-akrehamnfrom-alfrom-arfrom-azurewebsiteshikagamiishibukawalbrz" + + "ycharternopilawalesundfrom-capitalonewjerseyfrom-cogxn--1ctwolom" + + "inamatargitlaborfrom-ctransurlfrom-dchoyodobashichikashukujitawa" + + "ravennagasakinderoyfrom-dedyn-berlincolnfrom-flanderservemp3from" + + "-gaulardalfrom-hichisochildrensgardenfrom-iafrom-idfrom-ilfrom-i" + + "n-brbar1from-kservep2patriafrom-kyowariasahikawafrom-lanciafrom-" + + "mamurogawafrom-mdfrom-meeresistancefrom-mifunefrom-mnfrom-modale" + + "nfrom-mservepicservequakefrom-mtnfrom-nctulangevagrigentomologye" + + "onggiehtavuoatnabudapest-a-la-masion-rancherkasydneyfrom-ndfrom-" + + "nefrom-nh-serveblogspotrapaniizafrom-njservesarcasmatartanddesig" + + "nfrom-nminamiizukaminoyamaxunispacefrom-nvalledaostaobaomoriguch" + + "iharag-cloud-charitychyattorneyagawakepnogatagajobojis-a-cubicle" + + "-slavellinodeobjectservicesevastopolefrom-nyminamimakis-a-democr" + + "atravelchannelfrom-ohdattorelayfrom-oketogurafrom-orfrom-padovak" + + "sdalfrom-pratohmandalfrom-ris-a-designerfrom-schmidtre-gauldalfr" + + "om-sdfrom-tnfrom-txn--1lqs03nfrom-utsiracusagamiharafrom-val-dao" + + "stavalleyfrom-vtravelersinsurancefrom-wafrom-wiardwebredirectmee" + + "trdfrom-wvallee-aosteroyfrom-wyfrosinonefrostalowa-wolawafroyait" + + "akaharunzenfstcgroupaviancarrierfujiiderafujikawaguchikonefujimi" + + "nokamoenairguardiannakadomarinebraskaunicommbankatsushikabeelden" + + "geluidvallee-d-aosteigenfujinomiyadattowebcampinashikiminohostfo" + + "ldnavyfujiokayamangonohejis-a-doctorayfujisatoshonairlinedre-eik" + + "erfujisawafujishiroishidakabiratoridefenseljordfujitsurugashiman" + + "gyshlakasamatsudoomdnsiskinkyotobetsumidatlantichristiansburgrim" + + "stadyndns-mailutskashibatakatorinternationalfirearmsanjotlon-2fu" + + "jixeroxfordefinimakanegasakinkobierzycefujiyoshidavvenjargap-nor" + + "theast-3fukayabeatsevenassisicilyfukuchiyamadavvesiidappnodebala" + + "ncertificationfukudomigawafukuis-a-financialadvisor-aurdalfukumi" + + "tsubishigakirovogradoyfukuokazakiryuohkurafukuroishikarikaturind" + + "alfukusakisarazure-mobileirfjordfukuyamagatakahashimamakishiwada" + + "zaifudaigojomedio-campidano-mediocampidanomediofunabashiriuchina" + + "dafunagatakahatakaishimoichinosekigaharafunahashikamiamakusatsum" + + "asendaisennangooglecodespotrendhostingfundaciofunkfeuerfuoiskuju" + + "kuriyamaniwakuratefuosskoczowiiheyakumoduminamiminowafurnituregg" + + "io-calabriafurubirafurudonostiaafurukawairportland-4-salernobori" + + "betsucksharis-a-geekatsuyamarumorimachidafusodegaurafussaikisofu" + + "kushimannore-og-uvdalfutabayamaguchinomihachimanagementrentin-su" + + "d-tirolfutboldlygoingnowhere-for-morenakasatsunairtelebitbridges" + + "toneendoftheinternethnologyfuttsurugimperiafuturecmsharpfizerfut" + + "urehostingfuturemailingfvgfyresdalhangglidinghangoutsystemscloud" + + "hannanmokuizumodenakayamanxn--1lqs71dhannortonhanyuzenhapmirclou" + + "dplatform0harstadharvestcelebrationhasamaoris-a-hunterhasaminami" + + "-alpshimokawahashbanghasudahasura-appgfoggiahasvikautokeinotogaw" + + "ahatoyamazakitahiroshimapartmentshimokitayamahatsukaichikaiseihe" + + "ijis-a-knightpointtohobby-sitehattfjelldalhayashimamotobungotaka" + + "dancehazuminobusells-for-ustkanmakiwakunigamiharutwentehelsinkit" + + "akamiizumisanofidelitysvardonnakamuratajimidorittogliattis-a-lan" + + "dscaperugiahembygdsforbundhemneshimonitayanagitappharmacienshimo" + + "nosekikawahemsedalhepforgeherokussldheroyhgtvalleeaosteinkjerusa" + + "lembroideryhidorahigashiagatsumagoianiahigashichichibunkyonanaos" + + "himageandsoundandvisionthewifiatrentin-sued-tirolhigashihiroshim" + + "anehigashiizumozakitakatakaokaluganskygearappharmacyshimosuwalki" + + "s-a-lawyerhigashikagawahigashikagurasoedahigashikawakitaaikitaky" + + "ushuaiahigashikurumegurownproviderhigashimatsushimarburghigashim" + + "atsuyamakitaakitadaitoigawahigashimurayamamotorcycleshimotsukehi" + + "gashinarusells-itrentin-suedtirolhigashinehigashiomitamamurausuk" + + "itamihamadahigashiosakasayamanakakogawahigashishirakawamatakaraz" + + "ukamakurazakitamotosumy-gatewayhigashisumiyoshikawaminamiaikitan" + + "akagusukumodernhigashitsunosegawahigashiurawa-mazowszexnetrentin" + + "o-a-adigehigashiyamatokoriyamanashiibahccavuotnagaragusadocktera" + + "mo-siemenscaledogawarabikomaezakirunoipirangalsacentralus-2higas" + + "hiyodogawahigashiyoshinogaris-a-liberalhiraizumisatohnoshoooshik" + + "amaishimofusartshimotsumahirakatashinagawahiranairtrafficplexus-" + + "1hirarahiratsukaeruhirayakagehistorichouseshinichinanhitachiomiy" + + "agildeskaliszhitachiotagoppdalhitraeumtgeradeloittenrissagaerocl" + + "ubmedecincinnationwidealstahaugesunderseaportsinfolionetworkange" + + "rhjartdalhjelmelandholeckochikushinonsenergyholidayhomegoodshinj" + + "ournalismailillehammerfeste-iphdfcbankazoologyhomeiphiladelphiaa" + + "readmyblogsytehomelinkyard-cloudnshinjukumanowruzhgorodeohomelin" + + "uxn--1qqw23ahomeofficehomesecuritymacaparecidahomesecuritypchris" + + "tmaseratiresannanisshingucciprianidyndns-office-on-the-weberhome" + + "senseeringhomeunixn--2m4a15ehondahongotembaixadahonjyoitakasagot" + + "pantheonsitehornindalhorsellsyourhomeftphilatelyhortendofinterne" + + "t-dnshinkamigototalhospitalhoteleshinshinotsurgeryhotmailhoyange" + + "rhoylandetroitskypehumanitieshinshirohurdalhurumajis-a-libertari" + + "anhyllestadhyogoris-a-linux-usershintokushimahyugawarahyundaiwaf" + + "uneis-very-badajozis-a-nursembokukitchenis-very-evillageis-very-" + + "goodyearis-very-niceis-very-sweetpepperis-with-thebandovre-eiker" + + "isleofmanaustdaljenv-arubabizjeonnamerikawauejetztrentino-stirol" + + "jevnakershusdecorativeartshiranukamitondabayashiogamagoriziajewe" + + "lryjewishartgalleryjfkddiamondshiraois-a-painterhostsolutionshin" + + "tomikasaharajgorajlljls-sto1jls-sto2jls-sto3jmphonefosshiraokami" + + "tsuejnjaworznotairestaurantrentino-s-tiroljoyentrentino-sud-tiro" + + "ljoyokaichibajddarchitecturealtorlandjpnjprshiratakahagithubuser" + + "contentrentino-sudtiroljurkosaigawakosakaerodromegallupinbarclay" + + "cards3-sa-east-1koseis-a-photographerokuapphilipsynology-disksta" + + "tionkosherbrookegawakoshimizumakiyosemitekoshunantankhakassiakos" + + "ugekotohiradomainsureggioemiliaromagnamsosnowiechurchaseljedugit" + + "-pagespeedmobilizeroticahcesuoloansanokashiharakotourakouhokutam" + + "akiyosunndalkounosupplieshitaramakouyamashikekouzushimashikizuno" + + "kunimilitarykozagawakozakis-a-playershifteditchyouriphoenixn--2s" + + "crj9chromedicaltanissettaishinomakindlecznagasukekozowildlifesty" + + "lekpnkppspdnshizukuishimogosenkrasnikahokutokashikis-a-republica" + + "ncerresearchaeologicaliforniakrasnodarkredstonekristiansandcatsh" + + "izuokamogawakristiansundkrodsheradkrokstadelvaldaostarostwodzisl" + + "awilliamhillkryminamioguni5kumatorinowtvaporcloudkumejimasoykume" + + "nantokigawakunisakis-a-rockstarachowicekunitachiarailwaykunitomi" + + "gusukumamotoyamashikokuchuokunneppubtlshoppingkunstsammlungkunst" + + "unddesignkuokgrouphxn--32vp30haebaruericssongdalenviknakatsugawa" + + "kuregruhostingkurgankurobelaudibleasingleshopwarendalenugkurogim" + + "imatakatsukis-a-socialistdlibestadkuroisoftwarezzokuromatsunais-" + + "a-soxfankurotakikawasakis-a-studentalkushirogawakustanais-a-teac" + + "herkassyno-dshinyoshitomiokamisunagawakusupplynxn--3bst00minamis" + + "anrikubetsupportrentino-sued-tirolkutchanelveruminamitanekutnoku" + + "zumakis-a-techietis-a-llamarnardalkvafjordkvalsundkvamlidlugolek" + + "adenagahamaroyerkvanangenkvinesdalkvinnheradkviteseidatingkvitso" + + "ykwpspectruminamiuonumassivegridkzmisconfusedmishimasudamissilel" + + "uxembourgmisugitokorozawamitourismilevangermitoyoakemiuramiyazur" + + "econtainerdpolicemiyotamanomjondalenmlbfanmontrealestatefarmequi" + + "pmentrentino-suedtirolmonza-brianzapposhoujis-an-actresshioyande" + + "xcloudmonza-e-della-brianzaptokuyamatsumaebashimodatemonzabrianz" + + "aramonzaebrianzamonzaedellabrianzamoonscaleforcemordoviamoriyama" + + "tsumotofukemoriyoshiminamiashigaramormonstermoroyamatsunomortgag" + + "emoscowinbarclays3-us-east-2moseushistorymosjoenmoskeneshowamoss" + + "howtimelhusgardenmosvikharkovanylvenicemoteginowaniigatakamatsuk" + + "awamoviemovimientokyotangotsukisosakitagawamozilla-iotrentinoa-a" + + "digemtranbymuginozawaonsenmuikamiokameokameyamatotakadamukoebenh" + + "avnmulhouseoullensvanguardmunakatanemuncienciamuosattemupiemonte" + + "murmanskhersonmurotorcraftrentinoaadigemusashimurayamatsusakahog" + + "inankokubunjis-an-anarchistoricalsocietymusashinoharamuseetrenti" + + "noalto-adigemuseumverenigingmusicarrdmutsuzawamy-vigorgemy-wangg" + + "ouvicircustomer-ocimdbananarepublic66myactivedirectorymyasustor-" + + "elvdalmycdn77-sslattuminamiyamashirokawanabelembetsukubankharkiv" + + "alleedaostemycloudswitcheshwindmillmydattolocalhistorymyddnsking" + + "mydissentrentinoaltoadigemydobisshikis-an-artistgorymydroboehrin" + + "gerikemydsienarutolgamyeffectrentinos-tirolmyfastblogermyfirewal" + + "lonieruchomoscienceandindustrynmyforuminanomyfritzmyftpaccessigd" + + "almyhome-servermyjinomykolaivareservegame-servermymailermymediap" + + "cistrondheimmobilieniyodogawamyokohamamatsudamypepilotsilkhmelni" + + "tskiyamarylandmypetsimple-urlmyphotoshibalatinombresciamypicture" + + "sirdalmypsxn--3ds443gmysecuritycamerakermyshopblockslupskhmelnyt" + + "skyivaomythic-beastslzmytis-a-bookkeeperspectakashimaritimoldelt" + + "aiwanairforcebetsuikidsmynasushiobarackmazerbaijan-mayen-rootari" + + "beiraogashimadachicagoboatsmolapymntrentinostirolmytuleaprendema" + + "sakihokumakogenebakkeshibechambagriculturennebudejjuedischesapea" + + "kebayernrtrentinosud-tirolmyvncitadeliverydyndns-remotewdyndns-s" + + "erverisignmywireitrentinosudtirolpklabudhabikinokawabarthadselec" + + "trentin-sudtirolplantsnoasakakinokiaplatformshangrilanxessokanag" + + "awaplatter-appimientakinoueplatterpinkhplaystation-cloudplazaplc" + + "itichocolatelevisionissayokkaichiropractichitosetogakushimotogan" + + "ewportkmaxxn--12c1fe0bradescotlandyndns-iparmatta-varjjatksatxn-" + + "-12cfi8ixb8lucerneplumbingoplurinacionalpodhaleviracloudletsoknd" + + "alpodlasiellaktyubinskiptveterinaireadthedocscappgafannefrankfur" + + "trentinosued-tirolpodzonepohlpoivronpokerpokrovskmpspbar2politic" + + "artoonartdecologiapolitiendapolkowicepoltavalle-aostathellewismi" + + "llerpomorzeszowindowskrakowinnersolarssonponpesaro-urbino-pesaro" + + "urbinopesaromasvuotnaritakoelnponypordenonepornporsangerporsangu" + + "geporsgrunnanyokoshibahikariwanumatakkofuefukihaboromskogpoznanp" + + "raxis-a-bruinsfanprdpreservationpresidioprgmrprimetelemarknx-ser" + + "versicherungprincipeprivatizehealthinsuranceprofesionalprogressi" + + "venneslaskerrylogisticsolognepromombetsurfastvps-serveronakanoto" + + "ddenpropertyprotectionprotonetrentinosuedtirolprudentialpruszkow" + + "iosolundbeckomaganeprvcyberlevagangaviikanonjis-an-engineeringpr" + + "zeworskogpulawypupioneerpvhagakhanamigawapvtrentinsud-tirolpwciv" + + "ilaviationpzqldqotoyohashimotoolsomaqponiatowadaqslingqualifioap" + + "pippugliaquickconnectrentinsudtirolquicksytestingquipelementsomn" + + "arviikamisatokaizukamikitayamatsuris-an-entertainerqvcivilisatio" + + "nsveiosvelvikomforbarcelonagawalmartattoolforgebinagisoccertmgra" + + "zimuthatogayachimataiji234lima-cityeatselinogradultateshinanomac" + + "himkentateyamaetnaamesjevuemielno-ipifony-1svizzerasvn-reposor-v" + + "arangerswidnicasadelamonedapliernewmexicodyn-vpndnsorfoldswidnik" + + "kokonoeswiebodzin-butterswiftcoverswinoujscienceandhistoryswissm" + + "arterthanyousynology-dsorocabalestrandabergamoareketunkommunalfo" + + "rbundturystykaniepcetuscanytushuissier-justicetuvalle-daostatics" + + "oundcastronomy-routertuxfamilytwmailvestre-slidreplantationvestr" + + "e-totennishiawakuravestvagoyvevelstadvibo-valentiavibovalentiavi" + + "deovillasouthwest1-uslivinghistoryvinnicaseihicampobassociatest-" + + "iservecounterstrikevinnytsiavipsinaappittsburghofficialvirginiav" + + "irtual-userveexchangevirtualcloudvirtualservervirtualuserveftpiw" + + "atevirtueeldomein-vigorlicevirtuelvisakegawaviterboknowsitallviv" + + "olkenkundenvixn--3hcrj9clanbibaidarmeniavlaanderenvladikavkazimi" + + "erz-dolnyvladimirvlogintoyonezawavminiservervologdanskommunevolv" + + "olkswagentsowavolyngdalvoorloperauniterois-gonevossevangenvotevo" + + "tingvotoyonowiwatsukiyonoshiroomgwloclawekomorotsukagawawmcloudw" + + "mflabspeedpartnersoownextdirectrevisohughesorreisahayakawakamiic" + + "hikawamisatottoris-bytomaritimekeepingworldworse-thandawowitdkom" + + "onow-dnshisognewpdevcloudwpenginepoweredwritesthisblogwroclawith" + + "googleapiszwtcircleverappsphinxn--3e0b707ewtfauskedsmokorsetagay" + + "aseralingenoamishirasatogokasells-for-lessavannahgawuozuwzmiuwaj" + + "imaxn--45q11clic20001wwwfarsundyndns-webhareidsbergentingripexn-" + + "-4gbriminingxn--4it168dxn--4it797kongsbergxn--4pvxs4allxn--54b7f" + + "ta0cclicketcloudcontrolapplicationcloud66xn--55qw42gxn--55qx5dxn" + + "--5js045dxn--5rtp49clinichofunatoriginstitutemasekasaokamiminers" + + "andvikcoromantovalle-d-aostatic-accessanfranciscofreakunemuroran" + + "gecloudyclusterxn--5rtq34kongsvingerxn--5su34j936bgsgxn--5tzm5gx" + + "n--6btw5axn--6frz82gxn--6orx2rxn--6qq986b3xlxn--7t0a264cliniquen" + + "oharaxn--80adxhkspjelkavikomatsushimarylhurstjordalshalsenxn--80" + + "ao21axn--80aqecdr1axn--80asehdbarefootballooningjerdrumckinseyol" + + "asitebinordre-landiscoveryggeebizenakanojohanamakinoharaustinnau" + + "mburggfarmerseineastasiamuneues3-ap-southeast-2ix4432-balsan-sue" + + "dtirolkuszczytnord-aurdalipayboltatarantours3-ap-northeast-2xn--" + + "80aswgxn--80augustowithyoutuberspacekitagatargetmyiphosteurxn--8" + + "ltr62koninjambylxn--8pvr4uxn--8y0a063axn--90a3academiamicable-mo" + + "democraciaxn--90aeroportalabamagasakishimabaraogakibichuoxn--90a" + + "ishobarakawagoexn--90azhytomyravendbargainstantcloudfunctionsncf" + + "dishakotanavigationavoirmcpehimejibigawaustraliamusementdllpages" + + "3-ca-central-1xn--9dbhblg6dietritonxn--9dbq2axn--9et52uxn--9krt0" + + "0axn--andy-iraxn--aroport-byaotsurreyxn--asky-iraxn--aurskog-hla" + + "nd-jnbarreauctionfabricagliaricoharuhrxn--avery-yuasakuhokkaidop" + + "aaskvollxn--b-5gaxn--b4w605ferdxn--balsan-sdtirol-nsbspreadbetti" + + "ngxn--bck1b9a5dre4clintonoshoesantabarbaraxn--bdddj-mrabdxn--bea" + + "ralvhki-y4axn--berlevg-jxaxn--bhcavuotna-s4axn--bhccavuotna-k7ax" + + "n--bidr-5nachikatsuuraxn--bievt-0qa2xn--bjarky-fyasakaiminatoyoo" + + "kaneyamazoexn--bjddar-ptarnobrzegyptianxn--blt-elabourxn--bmlo-g" + + "raingerxn--bod-2natalxn--bozen-sdtirol-2obanazawaxn--brnny-wuaca" + + "demy-firewall-gatewayxn--brnnysund-m8accident-investigation-apti" + + "bleadpagesquare7xn--brum-voagatroandinosaurepaircraftingvollomba" + + "rdiademonmouthagebostadxn--btsfjord-9zaxn--bulsan-sdtirol-nsbarr" + + "el-of-knowledgeappleborkaracoldwarszawaustrheimatunduhrennesoyok" + + "osukanraukraanghkeymachineustargardds3-eu-central-1xn--c1avgxn--" + + "c2br7gxn--c3s14minnesotaketakazakis-a-therapistoiaxn--cck2b3barr" + + "ell-of-knowledgehirnufcfanavuotnapleskns3-us-gov-west-1xn--cckwc" + + "xetdxn--cesena-forl-mcbremangerxn--cesenaforl-i8axn--cg4bkis-int" + + "o-animeinforumzxn--ciqpnxn--clchc0ea0b2g2a9gcdxn--comunicaes-v6a" + + "2oxn--correios-e-telecomunicaes-ghc29axn--czr694barsycenterprise" + + "sakikuchikuseikarugamvik-serverrankoshigayachiyodaejeonbukcoalph" + + "a-myqnapcloud-fr1xn--czrs0trogstadxn--czru2dxn--czrw28barsyonlin" + + "ewhampshirealtydalvdalaskanittedallasalleangaviikaascolipicenodu" + + "members3-us-west-1xn--d1acj3bashkiriauthordalandgcapebretonamicr" + + "osoftbank12xn--d1alfaromeoxn--d1atromsakatamayufuelblagrarchaeol" + + "ogyeongbuk0xn--d5qv7z876clothingdustdataitogitsuldalvivanovoldax" + + "n--davvenjrga-y4axn--djrs72d6uyxn--djty4konskowolayangrouphotogr" + + "aphysioxn--dnna-grajewolterskluwerxn--drbak-wuaxn--dyry-iraxn--e" + + "1a4cn-northwest-1xn--eckvdtc9dxn--efvn9spydebergxn--efvy88haibar" + + "akitahatakanezawaxn--ehqz56nxn--elqq16hair-surveillancexn--eveni" + + "-0qa01gaxn--f6qx53axn--fct429konsulatrobeepilepsykkylvenetodayxn" + + "--fhbeiarnxn--finny-yuaxn--fiq228c5hsrlxn--fiq64basicservercelli" + + "guriautomotiveconomiasakuchinotsuchiurakawakuyabukikonaikawachin" + + "aganoharamcoachampionshiphoptobamadridnbloggerevistaples3-eu-wes" + + "t-1xn--fiqs8srvarggatrentinsuedtirolxn--fiqz9storegontrailroadxn" + + "--fjord-lraxn--fjq720axn--fl-ziaxn--flor-jraxn--flw351exn--forl-" + + "cesena-fcbsstorfjordxn--forlcesena-c8axn--fpcrj9c3dxn--frde-gran" + + "drapidstorjcloud-ver-jpchungnamdalseidfjordyndns-picsannohelplfi" + + "nancialuxuryxn--frna-woaraisaijosoyrorostpetersburgxn--frya-hrax" + + "n--fzc2c9e2cngriwataraidyndns-wikiraxn--fzys8d69uvgmailxn--g2xx4" + + "8cnpyatigorskodjeepsondriodejaneirockartuzyxn--gckr3f0fbsbxn--1c" + + "k2e1bar0emmafann-arboretumbriamallamaceiobbcg12038xn--gecrj9cnsa" + + "ntacruzsewhalingroks-thisayamanobeokalmykiaxn--ggaviika-8ya47hak" + + "atanorthwesternmutualxn--gildeskl-g0axn--givuotna-8yasugitpagefr" + + "ontappixolinoxn--gjvik-wuaxn--gk3at1exn--gls-elacaixaxn--gmq050i" + + "s-into-carshirahamatonbetsurnadalxn--gmqw5axn--h-2failxn--h1aegh" + + "akodatexn--h2breg3evenestreams1xn--h2brj9c8cntoyotaparsantafedje" + + "ffersonxn--h3cuzk1discountysnestudioxn--hbmer-xqaxn--hcesuolo-7y" + + "a35basilicataniautoscanadaeguambulancechirealmpmnaval-d-aosta-va" + + "lleyokoteastcoastaldefenceastus2xn--hery-iraxn--hgebostad-g3axn-" + + "-hkkinen-5waxn--hmmrfeasta-s4accident-prevention-k3studynamische" + + "s-dnsopotrentinsued-tirolxn--hnefoss-q1axn--hobl-iraxn--holtlen-" + + "hxaxn--hpmir-xqaxn--hxt814exn--hyanger-q1axn--hylandet-54axn--i1" + + "b6b1a6a2exn--imr513nxn--indery-fyasuokannamihoboleslawiecolognew" + + "spaperxn--io0a7is-into-cartoonshirakokaminokawanishiaizubangexn-" + + "-j1aefbx-ostrowiechoseiroumuenchenissedaluroyxn--j1amhakonexn--j" + + "6w193gxn--jlq480n2rgxn--jlq61u9w7basketballfinanzgorzeleccollect" + + "ionayorovigovtaxihuanfshostyhostingjerstadotsuruokakegawaveroyke" + + "ngerdalces3-eu-west-2xn--jlster-byatominamidaitomanchesterxn--jr" + + "peland-54axn--jvr189mintereisenxn--k7yn95exn--karmy-yuaxn--kbrq7" + + "oxn--kcrx77d1x4axn--kfjord-iuaxn--klbu-woaxn--klt787dxn--kltp7dx" + + "n--kltx9axn--klty5xn--3oq18vl8pn36axn--koluokta-7ya57hakubahcavu" + + "otnagaivuotnagaokakyotambabyenglandxn--kprw13dxn--kpry57dxn--kpu" + + "t3is-into-gamessinazawaxn--krager-gyatsukanoyaltakasugais-leetre" + + "ntino-aadigexn--kranghke-b0axn--krdsherad-m8axn--krehamn-dxaxn--" + + "krjohka-hwab49jdevcloudjiffylkesbiblackbaudcdn-edgestackhero-net" + + "workinggroupaashorokanaiexn--ksnes-uuaxn--kvfjord-nxaxn--kvitsy-" + + "fyatsushiroxn--kvnangen-k0axn--l-1fairwindstuff-4-salexn--l1acce" + + "ntureklamborghinikolaeventstufftoread-booksnesor-odalxn--laheadj" + + "u-7yawaraxn--langevg-jxaxn--lcvr32dxn--ldingen-q1axn--leagaviika" + + "-52batochiokinoshimaintenanceobninskaragandavocatanzarowbq-aursk" + + "og-holandingdyniajudaicadaquest-mon-blogueurovision-riopretobish" + + "imagazinekobayashikshacknetnedalaheadjudygarlanddnslivelanddnss3" + + "-ap-southeast-1xn--lesund-huaxn--lgbbat1ad8jdfastlylbanzaiclouda" + + "ppscbgivingjemnes3-fips-us-gov-west-1xn--lgrd-poacctromsojamison" + + "xn--lhppi-xqaxn--linds-pramericanartrusteexn--lns-qlaquilanstutt" + + "gartrentoyonakagyokutoyakolobrzegersundxn--loabt-0qaxn--lrdal-sr" + + "axn--lrenskog-54axn--lt-liacolonialwilliamsburgrondarxn--lten-gr" + + "anexn--lury-iraxn--m3ch0j3axn--mely-iraxn--merker-kuaxn--mgb2dde" + + "susakis-certifiedunetlifyis-a-musicianxn--mgb9awbfbxostrowwlkpmg" + + "ruexn--mgba3a3ejtrvaroyxn--mgba3a4f16axn--mgba3a4fra1-dexn--mgba" + + "7c0bbn0axn--mgbaakc7dvfedorainfracloudfrontdoorxn--mgbaam7a8haku" + + "is-a-greenxn--mgbab2bdxn--mgbah1a3hjkrdxn--mgbai9a5eva00batsfjor" + + "diskussionsbereichattanooganordeste-idcasertairanzanhktcmemergen" + + "cyahikobeardubaiduckdns3-us-west-2xn--mgbai9azgqp6jejuniperxn--m" + + "gbayh7gpaleoxn--mgbbh1a71exn--mgbc0a9azcgxn--mgbca7dzdoxn--mgber" + + "p4a5d4a87gxn--mgberp4a5d4arxn--mgbgu82axn--mgbi4ecexposedxn--mgb" + + "pl2fhappouxn--mgbqly7c0a67fbcoloradoplateaudiopsysantamariakexn-" + + "-mgbqly7cvafr-1xn--mgbt3dhdxn--mgbtf8flatangerxn--mgbtx2bauhausp" + + "osts-and-telecommunicationswedeniwaizumiotsukumiyamazonawsmpplan" + + "etariumemorialillyombolzano-altoadigeometre-experts-comptables3-" + + "website-ap-northeast-1xn--mgbx4cd0abbvieeexn--mix082fedorapeople" + + "gallodingenxn--mix891fedoraprojectozsdeportevadsobetsulikes-pied" + + "monticellocalzonexn--mjndalen-64axn--mk0axin-dslgbtrycloudflarep" + + "bodynamic-dnsortlandxn--mk1bu44columbusheyxn--mkru45is-lostre-to" + + "teneis-a-nascarfanxn--mlatvuopmi-s4axn--mli-tlarvikonyvelolipopu" + + "sinteractivegashisuifuettertdasnetzxn--mlselv-iuaxn--moreke-juax" + + "n--mori-qsakuragawaxn--mosjen-eyawatahamaxn--mot-tlavagiskexn--m" + + "re-og-romsdal-qqbuseranishiaritakurashikis-not-certifiedxn--msy-" + + "ula0hakusanagochijiwadellogliastradingxn--mtta-vrjjat-k7aflaksta" + + "daokagakicks-assnasaarlandxn--muost-0qaxn--mxtq1misakis-an-accou" + + "ntantshiojirishirifujiedaxn--ngbc5azdxn--ngbe9e0axn--ngbrxn--3px" + + "u8komvuxn--30rr7yxn--nit225kooris-a-personaltrainerxn--nmesjevue" + + "mie-tcbalsan-sudtirollagdenesnaaseinet-freaksusonoxn--nnx388axn-" + + "-nodessakurais-savedxn--nqv7fs00emaxn--nry-yla5gxn--ntso0iqx3axn" + + "--ntsq17gxn--nttery-byaeservehalflifeinsurancexn--nvuotna-hwaxn-" + + "-nyqy26axn--o1achernihivgubsuzakananiikappuboliviajessheimpertri" + + "xcdn77-secureggiocalabriaxn--o3cw4haldenxn--o3cyx2axn--od0algxn-" + + "-od0aq3beneventoeidskoguchikuzenvironmentalconservationionjukudo" + + "yamaizuruovat-band-campaniavoues3-eu-west-3utilities-1kappchizip" + + "6116-b-datacentermezgorabogadobeaemcloud-dealerimo-i-rana4u2-loc" + + "alhostrodawarabruzzoologicalvinklein-addrammenuorochestereport3l" + + "3p0rtashkentatamotors3-ap-northeast-1337xn--ogbpf8flekkefjordxn-" + + "-oppegrd-ixaxn--ostery-fyaxn--osyro-wuaxn--otu796dxn--p1acfeiraq" + + "uarelleaseeklogesaveincloudxn--p1ais-slickazteleportlligatrentin" + + "o-alto-adigexn--pgbs0dhlxn--porsgu-sta26fermochizukirkenesaves-t" + + "he-whalessandria-trani-barletta-andriatranibarlettaandriaxn--pss" + + "u33lxn--pssy2uxn--q9jyb4communewyorkshirebungoonordkappartintuit" + + "oyotomiyazakinuyamashinatsukigatakasakitauraxn--qcka1pmcdirxn--q" + + "qqt11misasaguris-an-actorxn--qxa6axn--qxamsterdamnserverbaniaxn-" + + "-rady-iraxn--rdal-poaxn--rde-ulavangenxn--rdy-0nabaris-uberleetr" + + "entino-altoadigexn--rennesy-v1axn--rhkkervju-01aferraraxn--rholt" + + "-mragowoodsidevelopmentrysiljanxn--rhqv96gxn--rht27zxn--rht3dxn-" + + "-rht61exn--risa-5nativeamericanantiquesuzukanazawaxn--risr-iraxn" + + "--rland-uuaxn--rlingen-mxaxn--rmskog-byaxn--rny31halsaitamatsuku" + + "ris-a-gurusrcfastly-terrariuminamiechizenxn--rovu88bentleyomitan" + + "observerxn--rros-granvindafjordxn--rskog-uuaxn--rst-0naturalhist" + + "orymuseumcenterxn--rsta-franamizuholdingsmall-webhostingxn--rvc1" + + "e0am3exn--ryken-vuaxn--ryrvik-byaxn--s-1faithammarfeastafricarbo" + + "nia-iglesias-carboniaiglesiascarboniaxn--s9brj9community-prochow" + + "icexn--sandnessjen-ogbeppublishproxyzjampagexlimanowarudaxarnetf" + + "lixilovecollegefantasyleaguernseyokozeatonsbergivestbytemarkanza" + + "kiwielunnerhcloudiscourses3-external-1xn--sandy-yuaxn--sdtirol-n" + + "2axn--seral-lraxn--ses554gxn--sgne-graphoxn--42c2d9axn--skierv-u" + + "tazasuzukis-foundationxn--skjervy-v1axn--skjk-soaxn--sknit-yqaxn" + + "--sknland-fxaxn--slat-5naturalsciencesnaturellesvalbardunloppaci" + + "ficivilizationxn--slt-elabcn-north-1xn--smla-hraxn--smna-gratang" + + "entlentapisa-geekopervikfh-muensterxn--snase-nraxn--sndre-land-0" + + "cbeskidyn-ip24xn--snes-poaxn--snsa-roaxn--sr-aurdal-l8axn--sr-fr" + + "on-q1axn--sr-odal-q1axn--sr-varanger-ggbestbuyshouses3-website-a" + + "p-southeast-1xn--srfold-byaxn--srreisa-q1axn--srum-gratis-a-bull" + + "s-fanxn--stfold-9xaxn--stjrdal-s1axn--stjrdalshalsen-sqbetainabo" + + "xfusejnyanagawaltervistaikikugawashingtondclk3xn--stre-toten-zcb" + + "hzcasinorddalimitedisrechtranaharimalselvendrellimoliseminempres" + + "ashibetsukuibmdivtasvuodnakaiwamizawaweddingjesdalivornoceanogra" + + "phiquemrxn--t60b56axn--tckwebspacexn--tiq49xqyjelasticbeanstalka" + + "zunotteroyxn--tjme-hraxn--tn0agrinetbankoryokamikawanehonbetsuru" + + "taharaxn--tnsberg-q1axn--tor131oxn--trany-yuaxn--trentin-sd-tiro" + + "l-rzbieidsvollombardynaliasnesoddenmarkhangelskjakdnepropetrovsk" + + "iervaapsteiermarkarasjohkamikoaniihamatamakawajimarriottcp4xn--t" + + "rentin-sdtirol-7vbrplsbxn--45br5cylxn--trentino-sd-tirol-c3biela" + + "washtenawdev-myqnapcloudcontrolledekagaminogifts3-website-ap-sou" + + "theast-2xn--trentino-sdtirol-szbiellaakesvuemielecceu-1xn--trent" + + "inosd-tirol-rzbieszczadygeyachts3-website-eu-west-1xn--trentinos" + + "dtirol-7vbievathletajimabaridagawakkanaibetsubamericanfamilydscl" + + "ouderacingjovikarasjokarasuyamarshallstatebankarateu-2xn--trenti" + + "nsd-tirol-6vbifukagawassamukawatarikuzentakatainaioirasebastopol" + + "ogyeongnamegawafaicloudineat-urlomzaporizhzheguriitatebayashijon" + + "awateu-3xn--trentinsdtirol-nsbigv-infolldalondonetskaratsuginami" + + "katagamilanoticias3-website-sa-east-1xn--trgstad-r1axn--trna-woa" + + "xn--troms-zuaxn--tysvr-vraxn--uc0atvestfoldxn--uc0ay4axn--uist22" + + "hamurakamigoris-a-hard-workershawaiijimarcheapigeelvinckaufenxn-" + + "-uisz3gxn--unjrga-rtarumizusawaxn--unup4yxn--uuwu58axn--vads-jra" + + "xn--valle-aoste-ebbtunesorumincomcastresindevicenzaporizhzhiaxn-" + + "-valle-d-aoste-ehbodoes-it1-eurxn--valleaoste-e7axn--valledaoste" + + "-ebbvacationsvcivilwarmiastagets-itmparochernigovernmentoyosatoy" + + "okawaxn--vard-jraxn--vegrshei-c0axn--vermgensberater-ctbihorolog" + + "yonagoyaxn--vermgensberatung-pwblogoipizzaxn--vestvgy-ixa6oxn--v" + + "g-yiabkhaziaxn--vgan-qoaxn--vgsy-qoa0jelenia-goraxn--vgu402comob" + + "araxn--vhquvestnesouthcarolinarvikomakiyosatokamachintaifun-dnsa" + + "liashishikuis-a-patsfanxn--vler-qoaxn--vre-eiker-k8axn--vrggt-xq" + + "adxn--vry-yla5gxn--vuq861bikedaemoneyonagunicloudivttasvuotnakam" + + "agayahooguyoriikarelianceu-4xn--w4r85el8fhu5dnraxn--w4rs40lxn--w" + + "cvs22dxn--wgbh1comparemarkerryhotelsantoandreamhostersanukinvest" + + "mentsaobernardownloadyndns-workshopitsitexasaogoncasacamdvrcampi" + + "nagrandebuilderschlesischesaotomelbournexn--wgbl6axn--xhq521bilb" + + "aokinawashirosatochigiessensiositechnologyoshiokanumazuryukiiyam" + + "anouchikuhokuryugasakitashiobaraxn--xkc2al3hye2axn--xkc2dl3a5ee0" + + "handsonyoursidelmenhorstalbanshellaspeziaxn--y9a3aquariumisawaxn" + + "--yer-znaturbruksgymnxn--yfro4i67oxn--ygarden-p1axn--ygbi2ammxn-" + + "-45brj9cldmailuzernxn--ystre-slidre-ujbillustrationredumbrellahp" + + "piacenzachpomorskienhlfanhs3-website-us-east-1xn--zbx025dxn--zf0" + + "ao64axn--zf0avxlxn--zfr164biocelotenkawaxnbayxz" // nodes is the list of nodes. Each node is represented as a uint32, which // encodes the node's children, wildcard bit and node type (as an index into @@ -528,1808 +535,1812 @@ const text = "9guacuiababia-goracleaningroks-theatree12hpalermomahachijolstere" // [15 bits] text index // [ 6 bits] text length var nodes = [...]uint32{ - 0x32f643, - 0x3b5c84, - 0x2f7846, - 0x2ed303, - 0x2ed306, - 0x391ec6, - 0x3ba683, - 0x242cc4, - 0x2089c7, - 0x2f7488, + 0x20bc43, + 0x25d9c4, + 0x2f8cc6, + 0x217243, + 0x217246, + 0x38e886, + 0x3bb603, + 0x2392c4, + 0x3a15c7, + 0x2f8908, 0x1a000c2, - 0x1f3c187, - 0x37b0c9, - 0x39a04a, - 0x39a04b, - 0x231983, - 0x234b85, - 0x2202642, - 0x280004, - 0x2f79c3, - 0x202645, - 0x2608c02, - 0x365e83, - 0x2a15d84, - 0x3b5585, - 0x2e12282, - 0x27520e, - 0x251a43, - 0x3adec6, - 0x3207d42, - 0x306e07, - 0x237306, - 0x3601f82, - 0x26d143, - 0x334e46, - 0x360f48, - 0x28e806, - 0x276804, + 0x1f3cf47, + 0x376f09, + 0x397eca, + 0x397ecb, + 0x23a2c3, + 0x23cf05, + 0x22070c2, + 0x2f5304, + 0x2f8e43, + 0x30eb85, + 0x260ad42, + 0x360f03, + 0x2a58bc4, + 0x30f345, + 0x2e13602, + 0x21638e, + 0x25c3c3, + 0x3b3dc6, + 0x3202302, + 0x3096c7, + 0x23fa86, + 0x3606a82, + 0x28e183, + 0x235e06, + 0x2f4148, + 0x295bc6, + 0x3c7c04, 0x3a00ac2, - 0x34cd89, - 0x222087, - 0x3b4c86, - 0x370f49, - 0x3c8608, - 0x354f84, - 0x25b9c6, - 0x3cdd86, - 0x3e029c2, - 0x2a7f06, - 0x24394f, - 0x27f04e, - 0x221684, - 0x2d4205, - 0x32f545, - 0x215589, - 0x23d909, - 0x335647, - 0x355246, - 0x203583, - 0x42272c2, - 0x22ce03, - 0x2937ca, - 0x4601ac3, - 0x3e1a45, - 0x239202, - 0x392449, - 0x4e03502, - 0x209784, - 0x2f4406, - 0x28fac5, - 0x3732c4, - 0x56263c4, - 0x233f03, - 0x233f04, - 0x5a02e42, - 0x385d04, - 0x5e83a84, - 0x25d6ca, + 0x34b449, + 0x220787, + 0x32e5c6, + 0x36ba09, + 0x3ce888, + 0x210944, + 0x2acb06, + 0x2076c6, + 0x3e02002, + 0x38cc46, + 0x24d68f, + 0x3cdb8e, + 0x22b1c4, + 0x234c85, + 0x330d45, + 0x3aaa09, + 0x247e89, + 0x236607, + 0x2584c6, + 0x267083, + 0x422d0c2, + 0x22d543, + 0x29b5ca, + 0x4609983, + 0x3403c5, + 0x30a8c2, + 0x3a4f89, + 0x4e03b42, + 0x207a04, + 0x354186, + 0x243885, + 0x36ebc4, + 0x5626e04, + 0x203b43, + 0x23c4c4, + 0x5a030c2, + 0x25b344, + 0x5f2d504, + 0x316d0a, 0x6200882, - 0x229547, - 0x27e508, - 0x7a07282, - 0x334a47, - 0x2ce984, - 0x2ce987, - 0x3dbac5, - 0x390e07, - 0x34b706, - 0x2a1184, - 0x36a285, - 0x257e87, - 0x8e07cc2, - 0x2a8083, - 0x9210642, - 0x3b3f43, - 0x96074c2, - 0x2173c5, - 0x9a00202, - 0x375d04, - 0x2ef285, - 0x2215c7, - 0x25d04e, - 0x2ba484, - 0x29a884, - 0x20ebc3, - 0x35c549, - 0x2c17cb, - 0x2c75c8, - 0x32cc48, - 0x3313c8, - 0x3e1f48, - 0x370d8a, - 0x390d07, - 0x356606, - 0x9e3de82, - 0x26f0c3, - 0x3d2103, - 0x3d3c84, - 0x26f103, - 0x361e43, - 0x1737f82, - 0xa206c02, - 0x284a05, - 0x2bc146, - 0x234944, - 0x3aee07, - 0x26bdc6, - 0x2cd644, - 0x3bdc87, - 0x20d483, - 0xa6d7f02, - 0xab0bf02, - 0xae7b6c2, - 0x30bcc6, - 0xb200282, - 0x2a4d45, - 0x3394c3, - 0x3d5bc4, - 0x2f9284, - 0x2f9285, - 0x3dff03, - 0xb64ac43, - 0xba05102, - 0x2093c5, - 0x2093cb, - 0x2b2a0b, - 0x204cc4, - 0x209849, - 0x20ae84, - 0xbe0b742, - 0x20c303, - 0x20e1c3, - 0xc207f42, - 0x2f2aca, - 0xc608a02, - 0x280285, - 0x2e858a, - 0x242644, - 0x210143, - 0x210a04, - 0x211943, - 0x211944, - 0x211947, - 0x212685, - 0x213086, - 0x213386, - 0x214683, - 0x218248, - 0x217143, - 0xca0cfc2, - 0x266308, - 0x28ea8b, - 0x2208c8, - 0x221106, - 0x222887, - 0x225048, - 0xda0aac2, - 0xde1c942, - 0x272d48, - 0x20f1c7, - 0x20f705, - 0x310f88, - 0xe302e48, - 0x2b0ec3, - 0x22bec4, - 0x391f42, - 0xe62c0c2, - 0xea06cc2, - 0xf22c442, - 0x22c443, - 0xf60cf02, - 0x316343, - 0x332284, - 0x214803, - 0x354f44, - 0x32430b, - 0x20cf03, - 0x2f2086, - 0x25d544, - 0x2c888e, - 0x377205, - 0x268a88, - 0x3adfc7, - 0x3adfca, - 0x231503, - 0x2355c7, - 0x2c1985, - 0x231504, - 0x253a06, - 0x253a07, - 0x31dd84, - 0xfb109c4, - 0x25d384, - 0x25d386, - 0x252684, - 0x3c2f86, - 0x20f4c3, - 0x20f4c8, - 0x210448, - 0x29a843, - 0x2f2a83, - 0x343c04, - 0x35c0c3, - 0x1020cdc2, - 0x106bd282, - 0x205083, - 0x243fc6, - 0x25bac3, - 0x274784, - 0x10a30c82, - 0x25ce43, - 0x316a83, - 0x214dc2, - 0x10e00d42, - 0x2d3286, - 0x235a07, - 0x229bc7, - 0x3c0d85, - 0x21cc84, - 0x2a0dc5, - 0x30f247, - 0x2e5a49, - 0x2ee886, - 0x3032c6, - 0x11602282, - 0x307a08, - 0x31a706, - 0x2b1bc5, - 0x30c3c7, - 0x30dcc4, - 0x30dcc5, - 0x11a02284, - 0x202288, - 0x11e09482, - 0x12200482, - 0x275946, + 0x3cd347, + 0x27b5c8, + 0x7a08502, + 0x336287, + 0x2d36c4, + 0x2d36c7, + 0x38aa45, + 0x38bf07, + 0x34a906, + 0x29ac84, + 0x3633c5, + 0x282507, + 0x920c142, + 0x38cdc3, + 0x960b4c2, + 0x3b5e03, + 0x9a08742, + 0x2691c5, + 0x9e00202, + 0x371604, + 0x387345, + 0x22b107, + 0x2e954e, + 0x206984, + 0x283b04, + 0x2079c3, + 0x30d489, + 0x2c4e4b, + 0x2e1248, + 0x32b788, + 0x3328c8, + 0x20a888, + 0xa36b84a, + 0x38be07, + 0x2f7086, + 0xa617282, + 0x35ca43, + 0x3d6443, + 0x3d8084, + 0x35ca83, + 0x3bb643, + 0x1738b82, + 0xaa04702, + 0x28a385, + 0x261e86, + 0x252084, + 0x3b0cc7, + 0x25b186, + 0x2d4704, + 0x3be9c7, + 0x204703, + 0xb2dc982, + 0xb728c42, + 0xba13982, + 0x230646, + 0xbe00282, + 0x26b385, + 0x33a0c3, + 0x3de644, + 0x2fd584, + 0x2fd585, + 0x3e9683, + 0xc253c43, + 0xc606342, + 0x20e9c5, + 0x20e9cb, + 0x223c8b, + 0x20e804, + 0x20ee49, + 0x210404, + 0xca10d82, + 0x211a83, + 0x2121c3, + 0xce02502, + 0x23020a, + 0xd20bd42, + 0x2f5585, + 0x2ece4a, + 0x246f44, + 0x213f43, + 0x2154c4, + 0x2178c3, + 0x2178c4, + 0x2178c7, + 0x218705, + 0x219546, + 0x21a186, + 0x2172c3, + 0x220f88, + 0x215b03, + 0xd604242, + 0x2fc548, + 0x295e4b, + 0x229c88, + 0x22ac46, + 0x22b987, + 0x22e908, + 0xee016c2, + 0xf2295c2, + 0x278408, + 0x20b947, + 0x206e85, + 0x3e2208, + 0xf61c008, + 0x26a0c3, + 0x235a44, + 0x38e902, + 0xfa36c42, + 0xfe07f42, + 0x10637242, + 0x237243, + 0x10a04182, + 0x312683, + 0x2135c4, + 0x210903, + 0x210904, + 0x3a264b, + 0x204183, + 0x2f27c6, + 0x284a84, + 0x2ccf8e, + 0x240ec5, + 0x257008, + 0x2716c7, + 0x2716ca, + 0x21b9c3, + 0x25d7c7, + 0x2c5005, + 0x239e44, + 0x25ef06, + 0x25ef07, + 0x3601c4, + 0x10f10344, + 0x3169c4, + 0x3169c6, + 0x25d4c4, + 0x3c2086, + 0x206c43, + 0x206c48, + 0x20b2c8, + 0x2b3843, + 0x2301c3, + 0x344544, + 0x357203, + 0x11604042, + 0x11aea202, + 0x217843, + 0x203c06, + 0x3796c3, + 0x2fd344, + 0x11efd0c2, + 0x343583, + 0x332f83, + 0x21cdc2, + 0x12200d42, + 0x2d7946, + 0x228b07, + 0x27b347, + 0x2c7cc5, + 0x386404, + 0x3d4a45, + 0x3dcc47, + 0x2b5ec9, + 0x2cb106, + 0x2c7bc6, + 0x1320c602, + 0x2b6688, + 0x321346, + 0x327b05, + 0x2f7787, + 0x2fafc4, + 0x2fafc5, + 0x1370e7c4, + 0x30e7c8, + 0x13a08d02, + 0x13e00482, + 0x24c3c6, 0x200488, - 0x337b45, - 0x34d686, - 0x350448, - 0x360a48, - 0x12608cc5, - 0x12a15e84, - 0x215e87, - 0x12e0a902, - 0x13361e82, - 0x14612402, - 0x2f4505, - 0x14e8af45, - 0x269506, - 0x327ec7, - 0x3b26c7, - 0x1522ea43, - 0x32bb87, - 0x3c17c8, - 0x2162ed49, - 0x2753c7, - 0x22f487, - 0x22fe88, - 0x230686, - 0x231006, - 0x231c4c, - 0x23294a, - 0x232d47, - 0x234a4b, - 0x235847, - 0x23584e, - 0x21a36344, - 0x236704, - 0x238a07, - 0x260b47, - 0x23d046, - 0x23d047, - 0x335887, - 0x226dc3, - 0x21e2c982, - 0x23e846, - 0x23e84a, - 0x24004b, - 0x241287, - 0x241d05, - 0x242183, - 0x2423c6, - 0x2423c7, - 0x2fa483, - 0x22200102, - 0x2435ca, - 0x2277c682, - 0x22b49682, - 0x22e40902, - 0x23237402, - 0x246ac5, - 0x247344, - 0x23e0da02, - 0x385d85, - 0x240643, - 0x299645, - 0x201ec4, - 0x21dd04, - 0x2d4e46, - 0x251dc6, - 0x2095c3, - 0x3cce44, - 0x37f243, - 0x24e0f982, - 0x216404, - 0x216406, - 0x222c05, - 0x2482c6, - 0x30c4c8, - 0x265e44, - 0x294208, - 0x232fc5, - 0x259508, - 0x2d0686, - 0x30e0c7, - 0x269c04, - 0x26269c06, - 0x26622383, - 0x3a47c3, - 0x2f7108, - 0x38bc44, - 0x26b32ec7, - 0x2e6946, - 0x2e6949, - 0x369588, - 0x37d748, - 0x389c84, - 0x204583, - 0x240702, - 0x2724e682, - 0x27626282, - 0x205c83, - 0x27a08b02, - 0x2fa404, - 0x2790c6, - 0x21a203, - 0x2c3d47, - 0x3b3a83, - 0x2ba548, - 0x21edc5, - 0x259f83, - 0x2ef205, - 0x2ef344, - 0x30d9c6, - 0x220006, - 0x221506, - 0x2f4c84, - 0x235c03, - 0x27e11702, - 0x282351c5, + 0x325105, + 0x3264c6, + 0x329dc8, + 0x34c608, + 0x14203ec5, + 0x16e2f004, + 0x2b0f87, + 0x1720fe82, + 0x1762e702, + 0x18a16542, + 0x354285, + 0x192904c5, + 0x241c06, + 0x3b6207, + 0x368e07, + 0x19616543, + 0x3d6787, + 0x283a08, + 0x273b4bc9, + 0x216547, + 0x3e03c7, + 0x238308, + 0x238b06, + 0x239946, + 0x23a58c, + 0x23b58a, + 0x23ba87, + 0x23cdcb, + 0x23dd47, + 0x23dd4e, + 0x2763eb84, + 0x23ec84, + 0x240d87, + 0x24be07, + 0x246386, + 0x246387, + 0x3b74c7, + 0x203643, + 0x27a13b02, + 0x248746, + 0x24874a, + 0x248acb, + 0x249f07, + 0x24aac5, + 0x24b283, + 0x24c646, + 0x24c647, + 0x2feac3, + 0x27e00102, + 0x24d30a, + 0x28378742, + 0x2863d842, + 0x28a47402, + 0x28e3fb82, + 0x24f085, + 0x24fdc4, + 0x29a0c542, + 0x25b3c5, + 0x231943, + 0x29d005, + 0x20a784, + 0x21e5c4, + 0x2d9d06, + 0x25cc06, + 0x20ebc3, + 0x3c1a44, + 0x341883, + 0x2aa03242, + 0x2b1504, + 0x3a1a46, + 0x2b1505, + 0x207106, + 0x2f7888, + 0x233d04, + 0x2b0ac8, + 0x2f3f05, + 0x27ce88, + 0x2d57c6, + 0x21c787, + 0x279ec4, + 0x2be79ec6, + 0x2c220a83, + 0x3a6543, + 0x2c05c8, + 0x334684, + 0x2c615587, + 0x280dc6, + 0x2e9b49, + 0x362488, + 0x32c448, + 0x333004, + 0x20d303, + 0x249182, + 0x2ce57f02, + 0x2d226cc2, + 0x20dd83, + 0x2d615fc2, + 0x2fea44, + 0x285786, + 0x23ca03, + 0x2c72c7, + 0x36ca43, + 0x3e1348, + 0x2253c5, + 0x267d03, + 0x3872c5, + 0x387404, + 0x3bad86, + 0x22a386, + 0x22b046, + 0x2580c4, + 0x23e103, + 0x2da15282, + 0x2de3d545, 0x200843, - 0x28a0da82, - 0x22f203, - 0x3233c5, - 0x28e33fc3, - 0x29633fc9, - 0x29a00942, - 0x2a20fc42, - 0x292845, - 0x2166c6, - 0x2ada86, - 0x2e9f08, - 0x2e9f0b, - 0x346d4b, - 0x3c0f85, - 0x2d8489, + 0x2e603e82, + 0x23a543, + 0x3ca805, + 0x2ea22bc3, + 0x2f23c589, + 0x2f600942, + 0x2fe05342, + 0x2973c5, + 0x21f406, + 0x2b2986, + 0x308cc8, + 0x308ccb, + 0x346d8b, + 0x35b445, + 0x2dcf09, 0x1600b42, - 0x39b4c8, - 0x209b44, - 0x2aa031c2, - 0x34ca03, - 0x2b260d06, - 0x2b600fc2, - 0x3619c8, - 0x2ba293c2, - 0x33d78a, - 0x2bedd983, - 0x2c77b706, - 0x397c88, - 0x242986, - 0x38dc47, - 0x243b47, - 0x3cd90a, - 0x2426c4, - 0x365c04, - 0x37a709, - 0x2cbb1905, - 0x275246, - 0x20f3c3, - 0x24e104, - 0x2ced8384, - 0x3b4447, - 0x2d233647, - 0x25ce84, - 0x3b2b85, - 0x2695c8, - 0x3a4c87, - 0x3a9847, - 0x2d60fa02, - 0x26acc4, - 0x2981c8, - 0x248604, - 0x24bb44, - 0x24bf45, - 0x24c087, - 0x2da81989, - 0x21eb04, - 0x24d4c9, - 0x24d708, - 0x24de84, - 0x24de87, - 0x2de4e483, - 0x24f8c7, - 0x2e201282, - 0x16be142, - 0x250386, - 0x251187, - 0x2515c4, - 0x252dc7, - 0x254047, - 0x254603, - 0x2ba882, - 0x20e782, - 0x32cd43, - 0x3ce884, - 0x3ce88b, - 0x2e72cd48, - 0x259a04, - 0x255d05, - 0x2576c7, - 0x20e785, - 0x31d28a, - 0x259943, - 0x2ea091c2, - 0x21d304, - 0x260909, - 0x264e43, - 0x264f07, - 0x28c949, - 0x2091c8, - 0x26f783, - 0x283187, - 0x283b89, - 0x26a503, - 0x28b544, - 0x28cb89, - 0x290cc6, - 0x2e9d03, - 0x207c82, - 0x23cc03, - 0x2bdf47, - 0x23cc05, - 0x2c15c6, - 0x296d84, - 0x365485, - 0x2844c3, - 0x2148c6, - 0x27eb43, - 0x209a42, - 0x24ac04, - 0x2ee08882, - 0x2f368483, - 0x2f6033c2, - 0x249f83, - 0x20dc44, - 0x303b07, - 0x348546, - 0x27cec2, - 0x2fa04d82, - 0x30c6c4, - 0x30211ac2, - 0x30621c42, - 0x2f0f04, - 0x2f0f05, - 0x363e85, - 0x260286, - 0x30a06d42, - 0x20f8c5, - 0x219a45, - 0x21bb43, - 0x225d86, - 0x227545, - 0x265d82, - 0x360685, - 0x30bc44, - 0x265d83, - 0x265fc3, - 0x30e08f42, - 0x2e4dc7, - 0x24d904, - 0x24d909, - 0x24e004, - 0x28adc3, - 0x2b9808, - 0x3128adc4, - 0x28adc6, - 0x2a49c3, - 0x256543, - 0x266a83, - 0x316fb9c2, - 0x308982, - 0x31a00642, - 0x33b208, - 0x3e0108, - 0x3bef86, - 0x351a05, - 0x303c85, - 0x207d87, - 0x31e46145, - 0x23ca82, - 0x3229cac2, - 0x32600042, - 0x27db48, - 0x31a645, - 0x2feac4, - 0x248205, - 0x2497c7, - 0x388944, - 0x2434c2, - 0x32a0b2c2, - 0x352084, - 0x228b07, - 0x292d07, - 0x390dc4, - 0x3d2c03, - 0x29a784, - 0x29a788, - 0x231346, - 0x25388a, - 0x2f5844, - 0x299e48, - 0x235384, - 0x222986, - 0x29ca84, - 0x2f4806, - 0x24dbc9, - 0x2abc07, - 0x213ec3, - 0x32e5b542, - 0x3a2503, - 0x20b942, - 0x33205742, - 0x34c006, - 0x386d08, - 0x2adc07, - 0x30b109, - 0x2addc9, - 0x2b0405, - 0x2b2d89, - 0x2b3cc5, - 0x2b4b05, - 0x2b5f88, - 0x33611b04, - 0x33a54747, - 0x22f843, - 0x2b6187, - 0x22f846, - 0x2b6987, - 0x2ab845, - 0x22f0c3, - 0x33e32702, - 0x210384, - 0x3422cb02, - 0x3460b5c2, - 0x314d06, - 0x27e485, - 0x2b8ec7, - 0x356e03, - 0x361dc4, - 0x21d783, - 0x355e03, - 0x34a09582, - 0x35208fc2, - 0x391fc4, - 0x32ae03, - 0x305545, - 0x3560f782, - 0x35e02182, - 0x305d46, - 0x2069c4, - 0x30a304, - 0x30a30a, - 0x366005c2, - 0x2160c3, - 0x21528a, - 0x219008, - 0x36a0e704, + 0x2d2908, + 0x20f144, + 0x30602bc2, + 0x33e203, + 0x30e4bfc6, + 0x31200fc2, + 0x20ae88, + 0x31613242, + 0x37aa4a, + 0x32239383, + 0x32b77546, + 0x318348, + 0x38db06, + 0x389c87, + 0x24d887, + 0x20724a, + 0x246fc4, + 0x360c84, + 0x376889, + 0x32fb3a05, + 0x2163c6, + 0x20bb43, + 0x263284, + 0x33232d44, + 0x32d187, + 0x3365e987, + 0x2edb44, + 0x250145, + 0x241cc8, + 0x250387, + 0x250607, + 0x33a18242, + 0x2a2704, + 0x29e388, + 0x251b04, + 0x254744, + 0x254b05, + 0x254c47, + 0x3468b8c9, + 0x2555c4, + 0x256b09, + 0x256d48, + 0x257604, + 0x257607, + 0x257d03, + 0x259ac7, + 0x34a01282, + 0x16c0502, + 0x25b506, + 0x25bb47, + 0x25c404, + 0x25e347, + 0x25f247, + 0x25fc83, + 0x34e5c082, + 0x239fc2, + 0x260743, + 0x260744, + 0x26074b, + 0x32b888, + 0x2891c4, + 0x2618c5, + 0x262fc7, + 0x2ee845, + 0x3b930a, + 0x266b03, + 0x3520eb02, + 0x21dc84, + 0x26b6c9, + 0x26f443, + 0x26f507, + 0x384989, + 0x211fc8, + 0x213bc3, + 0x286bc7, + 0x288f89, + 0x276a83, + 0x290984, + 0x291d49, + 0x2951c6, + 0x3825c3, + 0x204982, + 0x268803, + 0x2c0307, + 0x38f005, + 0x2c4c46, + 0x219a44, + 0x372285, + 0x289e43, + 0x21abc6, + 0x22e143, + 0x20c342, + 0x253c04, + 0x35634402, + 0x35a34403, + 0x35e04342, + 0x253283, + 0x21a604, + 0x323c87, + 0x21fb46, + 0x290942, + 0x3620e8c2, + 0x32c684, + 0x36a17a42, + 0x36e09ac2, + 0x3caac4, + 0x3caac5, + 0x3b6b85, + 0x37d146, + 0x37207042, + 0x207045, + 0x20f745, + 0x213dc3, + 0x2267c6, + 0x227105, + 0x2305c2, + 0x35ac85, + 0x2305c4, + 0x233c43, + 0x233e83, + 0x3760a302, + 0x2318c7, + 0x257784, + 0x257789, + 0x263184, + 0x290343, + 0x2bd008, + 0x37a90344, + 0x290346, + 0x2b05c3, + 0x262243, + 0x343b43, + 0x37f03e02, + 0x30ad42, + 0x38200642, + 0x33bfc8, + 0x2158c8, + 0x3bfcc6, + 0x385145, + 0x323e05, + 0x202347, + 0x386823c5, + 0x2038c2, + 0x38aa0a82, + 0x38e00042, + 0x2832c8, + 0x2b65c5, + 0x302f84, + 0x250d45, + 0x2514c7, + 0x3b0184, + 0x24d202, + 0x3923b502, + 0x350984, + 0x22fec7, + 0x297b47, + 0x38bec4, + 0x3d7403, + 0x2b3784, + 0x2b3788, + 0x239c86, + 0x25ed8a, + 0x358e44, + 0x29ddc8, + 0x24ffc4, + 0x22ba86, + 0x2a0a44, + 0x354586, + 0x257a49, + 0x221247, + 0x39d543, + 0x39605102, + 0x386d03, + 0x210f82, + 0x39a027c2, + 0x268f86, + 0x3b2848, + 0x2b2b07, + 0x2331c9, + 0x2b2cc9, + 0x2b5585, + 0x2b6f09, + 0x2b7705, + 0x2b8545, + 0x2b94c8, + 0x39e17a84, + 0x3a25fdc7, + 0x2b96c3, + 0x2b96c7, + 0x3e0786, + 0x2b9c87, + 0x2af945, + 0x2d0843, + 0x3a63b342, + 0x214184, + 0x3aa11402, + 0x3ae1ec82, + 0x31e946, + 0x27b545, + 0x2bbd87, + 0x3c32c3, + 0x20ccc4, + 0x21e103, + 0x2f6883, + 0x3b2042c2, + 0x3ba08e82, + 0x38e984, + 0x25c043, + 0x308985, + 0x3be05502, + 0x3c602102, + 0x222f86, + 0x2e9484, + 0x2f0284, + 0x2f028a, + 0x3ce005c2, + 0x20e103, + 0x23498a, + 0x26a7c8, + 0x3d2b1b84, 0x2005c3, - 0x36e0a2c3, - 0x26a749, - 0x247109, - 0x2c3e46, - 0x372191c3, - 0x2191c5, - 0x21e7cd, - 0x22db06, - 0x2e61cb, - 0x37607542, - 0x358448, - 0x3b20c202, - 0x3b603082, - 0x39e285, - 0x3ba04b82, - 0x2af7c7, - 0x205603, - 0x227708, - 0x3be022c2, - 0x25ef84, - 0x21fc83, - 0x354a05, - 0x240746, - 0x227104, - 0x2f2a43, - 0x384583, - 0x3c206142, - 0x3c0f04, - 0x2bab45, - 0x2bdb47, - 0x281403, - 0x2be4c3, - 0x1616fc2, - 0x2be783, - 0x2beb83, - 0x3c600e02, - 0x33f584, - 0x235e06, - 0x2e6503, - 0x2bf943, - 0x3ca4b202, - 0x24b208, - 0x2c0904, - 0x33f306, - 0x253e87, - 0x29a946, - 0x38bbc4, - 0x4ae03102, - 0x22f70b, - 0x30180e, - 0x217a8f, - 0x2be183, - 0x4b65a642, - 0x1641882, - 0x4ba03802, - 0x2563c3, - 0x20ee83, - 0x21b306, - 0x34e0c6, - 0x395dc7, - 0x3d2484, - 0x4be16802, - 0x4c21f2c2, - 0x2e2845, - 0x33dec7, - 0x2c2506, - 0x4c669782, - 0x3626c4, - 0x2c7a83, - 0x4ca06902, - 0x4cf78103, - 0x2c9284, - 0x2cde89, - 0x4d2d5182, - 0x4d60a342, - 0x248985, - 0x4dad5682, - 0x4de01582, - 0x364e47, - 0x37b34b, - 0x243905, - 0x258509, - 0x270906, - 0x4e201584, - 0x206d89, - 0x2d6a07, - 0x22a147, - 0x22c743, - 0x2f0d86, - 0x352f87, - 0x21df43, - 0x2a87c6, - 0x4ea29a82, - 0x4ee34242, - 0x2061c3, - 0x392605, - 0x303147, - 0x236d06, - 0x23cb85, - 0x24d884, - 0x2aad45, - 0x393dc4, - 0x4f201482, - 0x2e9184, - 0x247004, - 0x24700d, - 0x2ee249, - 0x22ca48, - 0x248c04, - 0x347fc5, - 0x204407, - 0x206504, - 0x26be87, - 0x267a45, - 0x4f60a284, - 0x2c6045, - 0x201484, - 0x253306, - 0x394fc5, - 0x4faa4c82, - 0x2758c3, - 0x357643, - 0x35d804, - 0x35d805, - 0x39d506, - 0x23ccc5, - 0x368e84, - 0x364343, - 0x4fe17e86, - 0x21a8c5, - 0x21e2c5, - 0x327dc4, - 0x2f58c3, - 0x2f58cc, - 0x502bdc42, - 0x50600e82, - 0x50a02702, - 0x21e1c3, - 0x21e1c4, - 0x50e0a682, - 0x3b9e88, - 0x2c1685, - 0x2d5ec4, - 0x230e86, - 0x51204202, - 0x5162d582, - 0x51a00c42, - 0x296545, - 0x2f4b46, - 0x265684, - 0x335386, - 0x229306, - 0x25bfc3, - 0x51e9068a, - 0x2815c5, - 0x293783, - 0x209f06, - 0x209f09, - 0x223fc7, - 0x2b7fc8, - 0x3c84c9, - 0x2e5bc8, - 0x22dd86, - 0x20eb83, - 0x52208c82, - 0x32d248, - 0x52606a02, - 0x52a0b982, - 0x215f83, - 0x2ee705, - 0x2a0484, - 0x300689, - 0x3c04c4, - 0x20bc08, - 0x5320b983, - 0x53724784, - 0x216708, - 0x246f47, - 0x53b49242, - 0x370242, - 0x32f4c5, - 0x385509, - 0x23cb03, - 0x31bb84, - 0x3424c4, - 0x204483, - 0x28698a, - 0x53f93b42, - 0x542101c2, - 0x2d7e83, - 0x396083, - 0x162dfc2, - 0x26e8c3, - 0x54615782, - 0x54a00bc2, - 0x54e17544, - 0x217546, - 0x271a44, - 0x27d983, - 0x289683, - 0x55200bc3, - 0x2403c6, - 0x3d5d85, - 0x2dbe07, - 0x2dbd46, - 0x2dcd88, - 0x2dcf86, - 0x202a04, - 0x2a21cb, - 0x2dfa03, - 0x2dfa05, - 0x20e982, - 0x365142, - 0x55646b42, - 0x55a0a942, - 0x216843, - 0x55e720c2, - 0x2720c3, - 0x2e0483, - 0x56603e42, - 0x56ae4806, - 0x258d46, - 0x56ee4942, - 0x5720e202, - 0x57666002, - 0x57a0cac2, - 0x57e0e882, - 0x58203882, - 0x20c543, - 0x3af006, - 0x5861e484, - 0x21620a, - 0x3b0106, - 0x281284, - 0x208143, - 0x59216102, - 0x203182, - 0x241c83, - 0x59617fc3, - 0x3c49c7, - 0x394ec7, - 0x5c245ec7, - 0x37efc7, - 0x228803, - 0x22880a, - 0x237bc4, - 0x31ef04, - 0x31ef0a, - 0x22eb85, - 0x5c60e742, - 0x250343, - 0x5ca00602, - 0x24dfc3, - 0x3a24c3, - 0x5d200582, - 0x3c1744, - 0x207f84, - 0x3dcc45, - 0x32e9c5, - 0x2f6786, - 0x30a546, - 0x5d63bec2, - 0x5da02542, - 0x301dc5, - 0x258a52, - 0x363486, - 0x291043, - 0x31c146, - 0x2b6585, - 0x1605cc2, - 0x65e0fec2, - 0x377b43, - 0x20fec3, - 0x39f483, - 0x66201102, - 0x20f443, - 0x666035c2, - 0x207583, - 0x3dcf88, - 0x269543, - 0x2b0286, - 0x3da087, - 0x34f0c6, - 0x34f0cb, - 0x2811c7, - 0x2f6f04, - 0x66e00c02, - 0x2c1505, - 0x67217f83, - 0x235fc3, - 0x332505, - 0x34a9c3, - 0x67b4a9c6, - 0x3d048a, - 0x2a98c3, - 0x2371c4, + 0x3d687643, + 0x326909, + 0x280609, + 0x2c73c6, + 0x3da43543, + 0x2887cd, + 0x3a8e86, + 0x3e0e8b, + 0x3de087c2, + 0x2ac948, + 0x42221082, + 0x42601e02, + 0x398285, + 0x42a02642, + 0x2b3187, + 0x202983, + 0x2272c8, + 0x42e06002, + 0x3a9984, + 0x22a003, + 0x3532c5, + 0x2491c6, + 0x22cf04, + 0x230183, + 0x44205b42, + 0x35b3c4, + 0x2beb45, + 0x2bff07, + 0x285203, + 0x2c1443, + 0x1619e82, + 0x2c1b03, + 0x2c2103, + 0x44600e02, + 0x239104, + 0x23e306, + 0x288d83, + 0x2c2a83, + 0x44a54202, + 0x254208, + 0x2c3a04, + 0x2052c6, + 0x387d07, + 0x3d4dc6, + 0x2c0544, + 0x52e025c2, + 0x3e064b, + 0x30624e, + 0x2201cf, + 0x3bc5c3, + 0x536687c2, + 0x161ee02, + 0x53a01f42, + 0x2f9843, + 0x20b603, + 0x2732c6, + 0x2cb846, + 0x2bc847, + 0x3b7004, + 0x53e1f542, + 0x542258c2, + 0x302645, + 0x32a647, + 0x2c6106, + 0x5463d782, + 0x382f04, + 0x2cc083, + 0x54a07bc2, + 0x54f73803, + 0x2cd984, + 0x2d2249, + 0x552da042, + 0x55611b82, + 0x2876c5, + 0x55ada802, + 0x56205542, + 0x35fb87, + 0x37718b, + 0x24d645, + 0x264489, + 0x275d46, + 0x56608004, + 0x208009, + 0x2f9cc7, + 0x349887, + 0x205543, + 0x2f1a46, + 0x351887, + 0x24c243, + 0x2a4106, + 0x56e1f002, + 0x57225e82, + 0x217443, + 0x3a5145, + 0x21c307, + 0x23f286, + 0x38ef85, + 0x263104, + 0x2aee85, + 0x390bc4, + 0x5760b402, + 0x2d8d84, + 0x2cbe44, + 0x39c84d, + 0x2cbe49, + 0x237848, + 0x262c84, + 0x38d345, + 0x3c2307, + 0x3c2bc4, + 0x273847, + 0x228f05, + 0x57ab4484, + 0x2c5b45, + 0x26e104, + 0x316546, + 0x3b6005, + 0x57e6b2c2, + 0x225e43, + 0x333e43, + 0x2c8784, + 0x2c8785, + 0x208c86, + 0x235585, + 0x263944, + 0x58392e03, + 0x587d1a86, + 0x219405, + 0x21b385, + 0x3b6104, + 0x2f93c3, + 0x358ecc, + 0x58ac0002, + 0x58e00e82, + 0x59209d42, + 0x21b283, + 0x21b284, + 0x59610442, + 0x308108, + 0x2c4d05, + 0x2dafc4, + 0x359186, + 0x59a205c2, + 0x59e109c2, + 0x5a200c42, + 0x2a3c05, + 0x354806, + 0x232c84, + 0x236346, + 0x213186, + 0x25aa03, + 0x5a694b4a, + 0x2853c5, + 0x29b583, + 0x20f546, + 0x5aa0f549, + 0x22c4c7, + 0x3c8c08, + 0x3ce749, + 0x2b6048, + 0x209146, + 0x207cc3, + 0x5af1de42, + 0x32bd88, + 0x5b256e02, + 0x5b601582, + 0x233243, + 0x2efe85, + 0x280f44, + 0x3e27c9, + 0x386e04, + 0x38d188, + 0x5be10fc3, + 0x5c3a2ac4, + 0x21f448, + 0x5c70df02, + 0x2cf1c2, + 0x330cc5, + 0x34af09, + 0x216443, + 0x31b884, + 0x36e504, + 0x20b683, + 0x28bf8a, + 0x5cb0f082, + 0x5ce13fc2, + 0x2dc903, + 0x3939c3, + 0x1609382, + 0x35c243, + 0x5d228882, + 0x5d600bc2, + 0x5da8d4c4, + 0x28d4c6, + 0x276e84, + 0x283103, + 0x28f583, + 0x5de00bc3, + 0x248e46, + 0x3de805, + 0x2e0947, + 0x2e0886, + 0x2e0e48, + 0x2e1046, + 0x2239c4, + 0x2a6a8b, + 0x2e30c3, + 0x2e30c5, + 0x2165c2, + 0x35fe82, + 0x5e24f102, + 0x5e603742, + 0x20a083, + 0x5ea77782, + 0x277783, + 0x2e4103, + 0x5f2093c2, + 0x5f6e8306, + 0x35e3c6, + 0x5fae8442, + 0x5fe12202, + 0x60233ec2, + 0x60ea9542, + 0x61345342, + 0x61602802, + 0x20b0c3, + 0x3da086, + 0x61a1b544, + 0x2b130a, + 0x3b1d46, + 0x285084, + 0x202703, + 0x62606c02, + 0x204cc2, + 0x26f843, + 0x62a296c3, + 0x3c5847, + 0x3b5f07, + 0x67e60847, + 0x341607, + 0x232403, + 0x23240a, + 0x257204, + 0x31e544, + 0x31e54a, + 0x24a905, + 0x6823a382, + 0x2583c3, + 0x68600602, + 0x257743, + 0x386cc3, + 0x68e00582, + 0x283984, + 0x202544, + 0x2032c5, + 0x3301c5, + 0x236e86, + 0x2fb4c6, + 0x6924ba82, + 0x69601cc2, + 0x2f97c5, + 0x35e0d2, + 0x298a06, + 0x291c43, + 0x2b4ac6, + 0x2cf8c5, + 0x1603442, + 0x71a056c2, + 0x341143, + 0x212bc3, + 0x29c403, + 0x71e01102, + 0x21e803, + 0x7222d4c2, + 0x201d03, + 0x3b1008, + 0x241c43, + 0x2b5406, + 0x3e3047, + 0x34dbc6, + 0x34dbcb, + 0x284fc7, + 0x33ee44, + 0x72a00c02, + 0x2c4b85, + 0x72e2f483, + 0x23b843, + 0x39fd45, + 0x348ec3, + 0x73748ec6, + 0x3e514a, + 0x2ade43, + 0x213a04, 0x2003c6, - 0x2b1fc6, - 0x67e3e083, - 0x273987, - 0x26a647, - 0x2a3e85, - 0x2b2346, - 0x21a903, - 0x6aa25fc3, - 0x6ae00a82, - 0x6b20e9c4, - 0x213b49, - 0x226685, - 0x266e44, - 0x35a3c8, - 0x241e85, - 0x6b642285, - 0x247e89, - 0x3b4d43, - 0x349604, - 0x6ba05b42, - 0x216a43, - 0x6be75c42, - 0x275c46, - 0x167ce82, - 0x6c20c182, - 0x296448, - 0x29a743, - 0x2c5f87, - 0x384605, - 0x2be805, - 0x2be80b, - 0x2f0b06, - 0x2bea06, - 0x2804c4, - 0x211c86, - 0x6c6f1608, - 0x287403, - 0x25be43, - 0x25be44, - 0x2f0184, - 0x2f8747, - 0x318245, - 0x6cb20202, - 0x6ce04fc2, - 0x6d604fc5, - 0x2c6a84, - 0x2f114b, - 0x2f9188, - 0x306444, - 0x6da2c8c2, - 0x6de2d782, - 0x3c2f03, - 0x2faf84, - 0x2fb245, - 0x2fbd47, - 0x6e2fe604, - 0x390ec4, - 0x6e616982, - 0x380fc9, - 0x2ffa45, - 0x243bc5, - 0x3005c5, - 0x6ea16983, - 0x237e84, - 0x237e8b, - 0x3010c4, - 0x30138b, - 0x301f05, - 0x217bca, - 0x303dc8, - 0x303fca, - 0x304883, - 0x30488a, - 0x6f213982, - 0x6f642c42, - 0x6fa0d403, - 0x6fede302, - 0x307643, - 0x702f8442, - 0x70739c42, - 0x308544, - 0x218386, - 0x3350c5, - 0x30c343, - 0x32fc06, - 0x3a0645, - 0x366b44, - 0x70a00902, - 0x2ae704, - 0x2d810a, - 0x2c0587, - 0x34ad46, - 0x235407, - 0x23e883, - 0x2c92c8, - 0x3dc44b, - 0x2ce445, - 0x223585, - 0x223586, - 0x342604, - 0x3cd748, - 0x2198c3, - 0x28b144, - 0x3cdc87, - 0x2f6b46, - 0x314a06, - 0x2c86ca, - 0x24d544, - 0x3214ca, - 0x70f5ccc6, - 0x35ccc7, - 0x255d87, - 0x2ab784, - 0x34c349, - 0x238cc5, - 0x2f8343, - 0x2201c3, - 0x7121b843, - 0x231704, - 0x71600682, - 0x266886, - 0x71acbc45, - 0x31c385, - 0x2505c6, - 0x2a6184, - 0x71e02b02, - 0x2421c4, - 0x7220d782, - 0x20d785, - 0x37d504, - 0x7361a6c3, - 0x73a08382, - 0x208383, - 0x34d886, - 0x73e07742, - 0x399508, - 0x223e44, - 0x223e46, - 0x396906, - 0x74257784, - 0x217e05, - 0x368548, - 0x265c07, - 0x2b1087, - 0x2b108f, - 0x2980c6, - 0x23c0c3, - 0x23db04, - 0x219b43, - 0x222ac4, - 0x24c404, - 0x74606c82, - 0x2bef83, - 0x337143, - 0x74a08502, - 0x20cec3, - 0x30be83, - 0x21270a, - 0x279407, - 0x25070c, - 0x74e509c6, - 0x250b46, - 0x253b87, - 0x752302c7, - 0x259009, - 0x75666444, - 0x75a0a1c2, - 0x75e02442, - 0x2c8a86, - 0x273784, - 0x2bf406, - 0x230748, - 0x3926c4, - 0x2f7a46, - 0x2ada45, - 0x7628dc88, - 0x2424c3, - 0x292005, - 0x3ab143, - 0x243cc3, - 0x243cc4, - 0x21d2c3, - 0x7664b642, - 0x76a04782, - 0x2f8209, - 0x293a05, - 0x293d84, - 0x294545, - 0x210f44, - 0x28eec7, - 0x35ff05, - 0x772ddf84, - 0x2ddf88, - 0x2df1c6, - 0x2e5144, - 0x2e8988, - 0x2e8fc7, - 0x7760ab02, - 0x2f1004, - 0x219c04, - 0x2ceb87, - 0x77a0ab04, - 0x2670c2, - 0x77e0ee42, - 0x20ee43, - 0x248884, - 0x29a503, - 0x2b7085, - 0x78201442, - 0x308885, - 0x23cac2, - 0x312645, - 0x23cac5, - 0x786010c2, - 0x316a04, - 0x78a018c2, - 0x349086, - 0x25ab46, - 0x385648, - 0x2cf888, - 0x314c84, - 0x35a585, - 0x310489, - 0x39b604, - 0x3d0444, - 0x2132c3, - 0x237c83, - 0x78f1fb05, - 0x24fd85, - 0x28b044, - 0x35eacd, - 0x25cdc2, - 0x366543, - 0x79201702, - 0x79600ec2, - 0x398fc5, - 0x341947, - 0x227344, - 0x3c86c9, - 0x2d8249, - 0x25fc83, - 0x27ccc8, - 0x35d1c9, - 0x220f47, - 0x79b7b845, - 0x39d086, - 0x3a7d46, - 0x3ac645, - 0x2ee345, - 0x79e06242, - 0x28db85, - 0x2c4b48, - 0x2d1686, - 0x7a22aa87, - 0x2d1ec4, - 0x2d1447, - 0x30d006, - 0x7a603c02, - 0x39d206, - 0x311cca, - 0x312545, - 0x7aa30ac2, - 0x7ae92ec2, - 0x36c7c6, - 0x7b292ec7, - 0x7b60d982, - 0x242c83, - 0x3c75c6, - 0x2d0744, - 0x33ec86, - 0x24eac6, - 0x20290a, - 0x359945, - 0x35c986, - 0x38a183, - 0x38a184, - 0x7ba1cc42, - 0x28f183, - 0x7be1e202, - 0x2fccc3, - 0x7c215504, - 0x20de04, - 0x7c60de0a, - 0x219243, - 0x239747, - 0x315146, - 0x3670c4, - 0x281142, - 0x2ac982, - 0x7ca007c2, - 0x22b3c3, - 0x255b47, + 0x327f06, + 0x73a0cb83, + 0x20cb87, + 0x326807, + 0x2a8485, + 0x239706, + 0x217303, + 0x76626a03, + 0x76a00a82, + 0x76ec8044, + 0x2114c9, + 0x22f7c5, + 0x361cc4, + 0x31e288, + 0x24ac45, + 0x7724ccc5, + 0x255849, + 0x32e683, + 0x23d7c4, + 0x77608402, + 0x21f783, + 0x77a96dc2, + 0x296dc6, + 0x169a902, + 0x77e15982, + 0x2a3b08, + 0x2b3743, + 0x2c5a87, + 0x2c1b85, + 0x2c5645, + 0x34de4b, + 0x2f17c6, + 0x34e046, + 0x277304, + 0x219d06, + 0x782f1e48, + 0x28e543, + 0x265043, + 0x265044, + 0x2fa884, + 0x309447, + 0x3da945, + 0x786f8842, + 0x78a059c2, + 0x792059c5, + 0x2ca784, + 0x2fa9cb, + 0x2fd488, + 0x24bd04, + 0x796376c2, + 0x79a06bc2, + 0x206bc3, + 0x2ff644, + 0x2ff905, + 0x300487, + 0x79f02ac4, + 0x38bfc4, + 0x7a2037c2, + 0x37e5c9, + 0x303fc5, + 0x24d905, + 0x304b45, + 0x7a61f6c3, + 0x240644, + 0x24064b, + 0x305b04, + 0x305dcb, + 0x306745, + 0x22030a, + 0x307108, + 0x30730a, + 0x307b83, + 0x307b8a, + 0x7ae1a782, + 0x7b24cec2, + 0x7b604683, + 0x7bad3b02, + 0x309ec3, + 0x7bef57c2, + 0x7c33a842, + 0x30a904, + 0x2210c6, + 0x236085, + 0x30ccc3, + 0x3ce106, + 0x219045, + 0x35a504, + 0x7c600902, + 0x2b4004, + 0x2dcb8a, + 0x2c3687, + 0x349246, + 0x25d607, + 0x248783, + 0x2cd9c8, + 0x3e7ccb, + 0x221e45, + 0x36e645, + 0x36e646, + 0x2f8384, + 0x3df448, + 0x205703, + 0x2075c4, + 0x2075c7, + 0x33ea86, + 0x3a2e06, + 0x2ccdca, + 0x256b84, + 0x2c244a, + 0x7ca08dc6, + 0x208dc7, + 0x261947, + 0x266584, + 0x266589, + 0x336705, + 0x2f9c43, + 0x22a543, + 0x7ce264c3, + 0x23a044, + 0x7d200682, + 0x3d8986, + 0x7d6d05c5, + 0x2b4d05, + 0x25b746, + 0x31d704, + 0x7da12742, + 0x24b2c4, + 0x7de04a02, + 0x20c2c5, + 0x336884, + 0x7f22ccc3, + 0x7f609742, + 0x209743, + 0x21e946, + 0x7fa01ec2, + 0x397488, + 0x22c344, + 0x22c346, + 0x394246, + 0x7fe63084, + 0x21a7c5, + 0x22ef08, + 0x231dc7, + 0x326fc7, + 0x326fcf, + 0x29e286, + 0x23cc03, + 0x241684, + 0x20f843, + 0x22bbc4, + 0x252e44, + 0x80207f02, + 0x3747c3, + 0x337cc3, + 0x80602b02, + 0x204143, + 0x37d083, + 0x21878a, + 0x27eb47, + 0x258ecc, + 0x80a59186, + 0x25abc6, + 0x25bcc7, + 0x80e38747, + 0x262389, + 0x812fc684, + 0x8160a0c2, + 0x81a01702, + 0x2cd186, + 0x20c984, + 0x39e1c6, + 0x267ec8, + 0x3a5204, + 0x2f8ec6, + 0x2b2945, + 0x81e7c4c8, + 0x24c743, + 0x28a485, + 0x35d1c3, + 0x24da03, + 0x24da04, + 0x21dc43, + 0x82254642, + 0x826014c2, + 0x2f9b09, + 0x296cc5, + 0x3d4744, + 0x3e5745, + 0x20f244, + 0x37b3c7, + 0x338685, + 0x82ed1984, + 0x2d1988, + 0x2dd986, + 0x2e1dc4, + 0x2e1fc8, + 0x83204ac2, + 0x2f0d84, + 0x20f904, + 0x2d38c7, + 0x83605fc4, + 0x2171c2, + 0x83a0b5c2, + 0x20b5c3, + 0x2875c4, + 0x2512c3, + 0x2ba385, + 0x83e35542, + 0x30ac45, + 0x279c42, + 0x311f85, + 0x2db805, + 0x842010c2, + 0x332f04, + 0x84602d82, + 0x30dd46, + 0x2192c6, + 0x34b048, + 0x2d49c8, + 0x31e8c4, + 0x301805, + 0x2c0d09, + 0x2d2a44, + 0x3e5104, + 0x21f203, + 0x207383, + 0x84a07385, + 0x26fac5, + 0x269544, + 0x337d4d, + 0x352902, + 0x352903, + 0x84e04102, + 0x85200ec2, + 0x396f45, + 0x354c47, + 0x22d144, + 0x3ce949, + 0x2dccc9, + 0x282303, + 0x282308, + 0x246809, + 0x227d47, + 0x85755b45, + 0x3615c6, + 0x362786, + 0x365cc5, + 0x2cbf45, + 0x85a01c42, + 0x2930c5, + 0x2c9448, + 0x2d6a06, + 0x85ed7247, + 0x306984, + 0x2b9ac7, + 0x3b9106, + 0x8624b302, + 0x208986, + 0x31160a, + 0x311e85, + 0x86615a82, + 0x86a14442, + 0x278b86, + 0x86e97d07, + 0x8720c4c2, + 0x20a803, + 0x2250c6, + 0x2d5884, + 0x27ac86, + 0x32fa86, + 0x3a32ca, + 0x32e805, + 0x30d8c6, + 0x36c343, + 0x36c344, + 0x87603bc2, + 0x321303, + 0x87a1b2c2, + 0x31fec3, + 0x87e34c04, + 0x2d8284, + 0x883e380a, + 0x209203, + 0x326ac7, + 0x315106, + 0x38fa84, + 0x236d42, + 0x2b0982, + 0x886007c2, + 0x232a43, + 0x261707, 0x2007c7, - 0x28e544, - 0x3e2587, - 0x2fbe46, - 0x20f307, - 0x30bdc4, - 0x2e5d45, - 0x218ac5, - 0x7ce05682, - 0x216f86, - 0x227043, - 0x227ec2, - 0x227ec6, - 0x7d21c882, - 0x7d62dc42, - 0x238f85, - 0x7da03d02, - 0x7de02a82, - 0x353545, - 0x2d9845, - 0x2af105, - 0x7e65aa03, - 0x279185, - 0x2f0bc7, - 0x2b7945, - 0x359b05, - 0x268b84, - 0x266cc6, - 0x3944c4, - 0x7ea008c2, - 0x7f798885, - 0x3d0907, - 0x3a09c8, - 0x269f86, - 0x269f8d, - 0x26f7c9, - 0x26f7d2, - 0x34d185, - 0x380843, - 0x7fa03b42, - 0x31f9c4, - 0x22db83, - 0x393e85, - 0x313785, - 0x7fe1fcc2, - 0x259fc3, - 0x8022b302, - 0x80a1cac2, - 0x80e00082, - 0x2ec2c5, - 0x213fc3, - 0x81208f02, - 0x81604642, - 0x3c1706, - 0x27e1ca, - 0x20c6c3, - 0x257c83, - 0x2f7343, - 0x832072c2, - 0x9161f702, - 0x91e07ac2, - 0x2034c2, - 0x3d3d09, - 0x2d4584, - 0x2e1c88, - 0x92305102, - 0x92a01502, - 0x2c2285, - 0x234e88, - 0x2f65c8, - 0x2fb70c, - 0x239683, - 0x92e13f42, - 0x9320e482, - 0x2bce06, - 0x315fc5, - 0x2e5583, - 0x247cc6, - 0x316106, - 0x253383, - 0x317803, - 0x317c46, - 0x319484, - 0x26aa06, - 0x236444, - 0x319b44, - 0x31ad0a, - 0x936bb102, - 0x24e605, - 0x31c58a, - 0x31c4c5, - 0x31e504, - 0x31e606, - 0x31e784, - 0x216d06, - 0x93a03c42, - 0x2ecf86, - 0x358f85, - 0x35c807, - 0x3c7386, - 0x253d84, - 0x2e5807, - 0x21dfc5, - 0x21dfc7, - 0x3c3a87, - 0x3c3a8e, - 0x280bc6, - 0x2bda05, - 0x20aa47, - 0x20e243, - 0x20e247, - 0x228f05, - 0x22bfc4, - 0x368842, - 0x32a1c7, - 0x241184, - 0x32a684, - 0x3ab1cb, - 0x21ab83, - 0x2dd0c7, - 0x21ab84, - 0x2dd3c7, - 0x3ae243, - 0x34f8cd, - 0x3aa588, - 0x93e45f84, - 0x366dc5, - 0x31f345, - 0x31f783, - 0x94223d42, - 0x322283, - 0x322b03, - 0x217104, - 0x283c85, - 0x224e87, - 0x38a206, - 0x393c43, - 0x22ad4b, - 0x322c8b, - 0x283d8b, - 0x2b32cb, - 0x2c718a, - 0x2d184b, - 0x2f1b4b, - 0x35ab4c, - 0x319f4b, - 0x374b91, - 0x39ad0a, - 0x3b794b, - 0x3c694c, - 0x3df28b, - 0x3256ca, - 0x325bca, - 0x326a4e, - 0x3271cb, - 0x32748a, - 0x328a51, - 0x328e8a, - 0x32938b, - 0x3298ce, - 0x32b70c, - 0x32c34b, - 0x32c60e, - 0x32c98c, - 0x32d6ca, - 0x32ee8c, - 0x9472f18a, - 0x32fd88, - 0x330949, - 0x33308a, - 0x33330a, - 0x33358b, - 0x3368ce, - 0x337751, - 0x341dc9, - 0x34200a, - 0x342b4b, - 0x34348d, - 0x34430a, - 0x3455d6, - 0x34694b, - 0x349e0a, - 0x34a38a, - 0x34b28b, - 0x34cc09, - 0x350249, - 0x3507cd, - 0x3510cb, - 0x352bcb, - 0x353689, - 0x353cce, - 0x35410a, - 0x35a04a, - 0x35a7ca, - 0x35b18b, - 0x35b9cb, - 0x35e2cd, - 0x35fa0d, - 0x360310, - 0x3607cb, - 0x36210c, - 0x36288b, - 0x36494b, - 0x36614e, - 0x36660b, - 0x36660d, - 0x36d70b, - 0x36e18f, - 0x36e54b, - 0x36f50a, - 0x36fb09, - 0x370089, - 0x94b7040b, - 0x3706ce, - 0x370a4e, - 0x3726cb, - 0x37374f, - 0x375fcb, - 0x37628b, - 0x37654a, - 0x37af49, - 0x37fa0f, - 0x3841cc, - 0x384bcc, - 0x385ece, - 0x38644f, - 0x38680e, - 0x3871d0, - 0x3875cf, - 0x3883ce, - 0x388f0c, - 0x389211, - 0x389652, - 0x38b3d1, - 0x38be8e, - 0x38c2cb, - 0x38c2ce, - 0x38c64f, - 0x38ca0e, - 0x38cd93, - 0x38d251, - 0x38d68c, - 0x38d98e, - 0x38de0c, - 0x38e353, - 0x38f1d0, - 0x3902cc, - 0x3905cc, - 0x390a8b, - 0x391bce, - 0x3920cb, - 0x392e4b, - 0x39418c, - 0x399a4a, - 0x39a50c, - 0x39a80c, - 0x39ab09, - 0x39d68b, - 0x39d948, - 0x39e649, - 0x39e64f, - 0x39ff0b, - 0x94fa0bca, - 0x3a268c, - 0x3a364b, - 0x3a3909, - 0x3a3cc8, - 0x3a458b, - 0x3a688a, - 0x3a6b0b, - 0x3a700c, - 0x3a77c9, - 0x3a7a08, - 0x3ab48b, - 0x3aeb8b, - 0x3b0d0e, - 0x3b244b, - 0x3b72cb, - 0x3c360b, - 0x3c38c9, - 0x3c3e0d, - 0x3d148a, - 0x3d4917, - 0x3d5618, - 0x3d8989, - 0x3d9ccb, - 0x3daad4, - 0x3dafcb, - 0x3db54a, - 0x3dbc0a, - 0x3dbe8b, - 0x3dd190, - 0x3dd591, - 0x3ddc4a, - 0x3de88d, - 0x3def8d, - 0x3e104b, - 0x217083, - 0x953b3583, - 0x2b0f46, - 0x27ca85, - 0x29c647, - 0x384906, - 0x1602342, - 0x2b3609, - 0x32fa04, - 0x2efcc8, - 0x21b783, - 0x31f907, - 0x230902, - 0x2b8f03, - 0x95603602, - 0x2d8d06, - 0x2da3c4, - 0x377084, - 0x201c43, - 0x95ed56c2, - 0x9622c344, - 0x34c287, - 0x9662bf82, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x106b48, - 0x205803, + 0x292704, + 0x258d47, + 0x300586, + 0x20ba87, + 0x230744, + 0x2b61c5, + 0x221c45, + 0x88a0d782, + 0x219e46, + 0x230bc3, + 0x29d6c2, + 0x2fc146, + 0x88e12682, + 0x89213402, + 0x213405, + 0x8962bdc2, + 0x89a02a02, + 0x351e45, + 0x2e3405, + 0x30a705, + 0x8a268b83, + 0x285845, + 0x2f1887, + 0x2b9385, + 0x32e9c5, + 0x257104, + 0x361b46, + 0x24e044, + 0x8a6008c2, + 0x8b2510c5, + 0x3967c7, + 0x213c08, + 0x27d046, + 0x27d04d, + 0x2803c9, + 0x2803d2, + 0x37e8c5, + 0x383403, + 0x8b6091c2, + 0x32f684, + 0x3a8f03, + 0x3d64c5, + 0x3136c5, + 0x8ba2a042, + 0x267d43, + 0x8be32982, + 0x8c629742, + 0x8ca00082, + 0x2ead45, + 0x39d643, + 0x8ce04942, + 0x8d206502, + 0x283946, + 0x2484ca, + 0x201c83, + 0x2638c3, + 0x2f2d43, + 0x8ee04a42, + 0x9d666342, + 0x9de0e002, + 0x205002, + 0x3d8109, + 0x2d9444, + 0x2e5488, + 0x9e308542, + 0x9ea017c2, + 0x393285, + 0x23d208, + 0x2f8088, + 0x30500c, + 0x241403, + 0x9ee6dac2, + 0x9f208e42, + 0x39dbc6, + 0x315f85, + 0x2e8c43, + 0x24cb06, + 0x3160c6, + 0x251403, + 0x317703, + 0x317dc6, + 0x319884, + 0x2a2446, + 0x34cd04, + 0x319f44, + 0x31aa0a, + 0x9f603582, + 0x257e85, + 0x31bfca, + 0x31bf05, + 0x31ce84, + 0x31cf86, + 0x31d104, + 0x21fa46, + 0x9fa16ec2, + 0x216ec6, + 0x271385, + 0x30d747, + 0x3c1346, + 0x25bec4, + 0x2e8ec7, + 0x2089c5, + 0x242c07, + 0x228947, + 0x22894e, + 0x2849c6, + 0x2b6dc5, + 0x205f07, + 0x3c3947, + 0x212d85, + 0x229b84, + 0x3235c2, + 0x23d887, + 0x249e04, + 0x35a784, + 0x2cf04b, + 0x9fe246c3, + 0x301387, + 0x2246c4, + 0x301687, + 0x310883, + 0x34e54d, + 0x3ad188, + 0xa0233984, + 0x3e16c5, + 0x31f985, + 0x31fdc3, + 0xa0608f02, + 0x3212c3, + 0x321a83, + 0x215ac4, + 0x289085, + 0x219fc7, + 0x36c3c6, + 0x390a43, + 0x233f0b, + 0x35be8b, + 0x2b504b, + 0x2cae8b, + 0x3991ca, + 0x2d6bcb, + 0x2f228b, + 0x32178c, + 0x31a34b, + 0x370491, + 0x398e4a, + 0x3b8a4b, + 0x3c95cc, + 0x3e6f4b, + 0x3230ca, + 0x323f4a, + 0x324dce, + 0x325a4b, + 0x325d0a, + 0x328911, + 0x328d4a, + 0x32924b, + 0x32978e, + 0x32a14c, + 0x32ae8b, + 0x32b14e, + 0x32b4cc, + 0x32ef0a, + 0x33068c, + 0xa0b3098a, + 0x331288, + 0x331e49, + 0x3348ca, + 0x334b4a, + 0x334dcb, + 0x33744e, + 0x338091, + 0x341cc9, + 0x341f0a, + 0x342c8b, + 0x343dcd, + 0x344c4a, + 0x345616, + 0x34698b, + 0x34844a, + 0x34888a, + 0x34a48b, + 0x34b2c9, + 0x34eec9, + 0x34f44d, + 0x34fc0b, + 0x3514cb, + 0x351f89, + 0x3525ce, + 0x3529ca, + 0x3550ca, + 0x35590a, + 0x3562cb, + 0x356b0b, + 0x35798d, + 0x359fcd, + 0x35a910, + 0x35adcb, + 0x35bacc, + 0x35cc8b, + 0x35f68b, + 0x3611ce, + 0x3617cb, + 0x3617cd, + 0x36740b, + 0x367e8f, + 0x36824b, + 0x36918a, + 0x369f49, + 0x36ab49, + 0xa0f6aecb, + 0x36b18e, + 0x36b50e, + 0x36e28b, + 0x36f04f, + 0x3718cb, + 0x371b8b, + 0x371e4a, + 0x376d89, + 0x37c74f, + 0x381d4c, + 0x38298c, + 0x3830ce, + 0x3835cf, + 0x38398e, + 0x383e10, + 0x38420f, + 0x384bce, + 0x38528c, + 0x385591, + 0x3859d2, + 0x387891, + 0x387ece, + 0x38830b, + 0x38830e, + 0x38868f, + 0x388a4e, + 0x388dd3, + 0x389291, + 0x3896cc, + 0x3899ce, + 0x389e4c, + 0x38a293, + 0x38af50, + 0x38b3cc, + 0x38b6cc, + 0x38bb8b, + 0x38e58e, + 0x38ea8b, + 0x38f2cb, + 0x39150c, + 0x3979ca, + 0x39864c, + 0x39894c, + 0x398c49, + 0x39ac8b, + 0x39af48, + 0x39b509, + 0x39b50f, + 0x39cf4b, + 0xa139e64a, + 0x3a3a0c, + 0x3a49cb, + 0x3a4c89, + 0x3a56c8, + 0x3a630b, + 0x3a810a, + 0x3a838b, + 0x3a9b0c, + 0x3aa649, + 0x3aa888, + 0x3ad7cb, + 0x3b0a4b, + 0x3b2e0e, + 0x3b494b, + 0x3b83cb, + 0x3c420b, + 0x3c44c9, + 0x3c488d, + 0x3d57ca, + 0x3d9857, + 0x3da218, + 0x3dc0c9, + 0x3de3cb, + 0x3df714, + 0x3dfc0b, + 0x3e018a, + 0x3e2a0a, + 0x3e2c8b, + 0x3e4810, + 0x3e4c11, + 0x3e5a4a, + 0x3e654d, + 0x3e6c4d, + 0x3e940b, + 0x219f43, + 0xa17b5883, + 0x3cc686, + 0x3df0c5, + 0x27a587, + 0x2ddec6, + 0x164bf82, + 0x2729c9, + 0x20c004, + 0x2f0788, + 0x226403, + 0x32f5c7, + 0x247f82, + 0x2bbdc3, + 0xa1a0e042, + 0x2dd846, + 0x2defc4, + 0x2c8404, + 0x3a0f43, + 0xa22da842, + 0xa262f444, + 0x2664c7, + 0xa2a35b02, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x117bc8, + 0x20d903, 0x2000c2, - 0xae888, - 0x212402, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0x208503, - 0x33cb96, - 0x36c093, - 0x3e2409, - 0x215d88, - 0x2c1389, - 0x31c706, - 0x3520d0, - 0x212113, - 0x2f6c08, - 0x282247, - 0x28d487, - 0x2aaa8a, - 0x36a609, - 0x3573c9, - 0x24cd4b, - 0x34b706, - 0x32ce4a, - 0x221106, - 0x32f603, - 0x2e4d05, - 0x20f4c8, - 0x28598d, - 0x2f45cc, - 0x3033c7, - 0x30e60d, - 0x215e84, - 0x2319ca, - 0x23248a, - 0x23294a, - 0x212407, - 0x23ce87, - 0x2410c4, - 0x269c06, - 0x35d584, - 0x305988, - 0x3c0509, - 0x2e9f06, - 0x2e9f08, - 0x24400d, - 0x2d8489, - 0x397c88, - 0x243b47, - 0x33230a, - 0x251186, - 0x2ff544, - 0x225c07, - 0x266a8a, - 0x23fb8e, - 0x246145, - 0x3dd98b, - 0x22b109, - 0x247109, - 0x205447, - 0x20544a, - 0x2ceac7, - 0x301949, - 0x347c88, - 0x33284b, - 0x2ee705, - 0x22c90a, - 0x265dc9, - 0x3568ca, - 0x21b8cb, - 0x225b0b, - 0x24cad5, - 0x2ce085, - 0x243bc5, - 0x237e8a, - 0x2527ca, - 0x321a07, - 0x234fc3, - 0x2c8a08, - 0x2e32ca, - 0x223e46, - 0x256689, - 0x28dc88, - 0x2e5144, - 0x38e109, - 0x2cf888, - 0x2d05c7, - 0x398886, - 0x3d0907, - 0x2c51c7, - 0x2401c5, - 0x245f8c, - 0x366dc5, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x5803, - 0x23e083, - 0x212402, - 0x22ea43, - 0x217fc3, - 0x205803, - 0x23e083, - 0x22ea43, - 0x217fc3, - 0x5803, - 0x269543, - 0x23e083, - 0x1d1843, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0xae888, - 0x212402, - 0x22ea43, - 0x22ea47, - 0x8ecc4, - 0x217fc3, - 0x1b5c04, - 0x23e083, - 0x212402, - 0x204542, - 0x2f6e82, - 0x2022c2, - 0x202582, - 0x2f2402, - 0x96206, - 0x51709, - 0xe9bc7, - 0x481a6c3, - 0x8e8c7, - 0x154546, - 0xaa43, - 0x11eec5, + 0x793c8, + 0x216542, + 0x343b43, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x202b03, + 0x33d956, + 0x365753, + 0x258bc9, + 0x2b0e88, + 0x2c4a09, + 0x31c146, + 0x3509d0, + 0x218053, + 0x33eb48, + 0x285c87, + 0x2929c7, + 0x2aebca, + 0x363749, + 0x333bc9, + 0x25dd0b, + 0x34a906, + 0x32b98a, + 0x22ac46, + 0x238c43, + 0x231805, + 0x206c48, + 0x28b04d, + 0x35434c, + 0x271047, + 0x309f4d, + 0x22f004, + 0x23a30a, + 0x23b0ca, + 0x23b58a, + 0x218347, + 0x2461c7, + 0x249d44, + 0x279ec6, + 0x34abc4, + 0x222bc8, + 0x386e49, + 0x209a46, + 0x308cc8, + 0x24dd4d, + 0x2dcf09, + 0x318348, + 0x24d887, + 0x21364a, + 0x25bb46, + 0x34bbc4, + 0x2298c7, + 0x3d8b8a, + 0x242f8e, + 0x2823c5, + 0x29788b, + 0x232789, + 0x280609, + 0x20d547, + 0x20d54a, + 0x2d3807, + 0x306389, + 0x37b048, + 0x37948b, + 0x2efe85, + 0x23770a, + 0x233c89, + 0x33324a, + 0x22654b, + 0x2297cb, + 0x25da95, + 0x2f0c45, + 0x24d905, + 0x24064a, + 0x26ba4a, + 0x390f47, + 0x23d343, + 0x2cd108, + 0x2e640a, + 0x22c346, + 0x261289, + 0x27c4c8, + 0x2e1dc4, + 0x2512c9, + 0x2d49c8, + 0x2d5707, + 0x2510c6, + 0x3967c7, + 0x399b07, + 0x248c45, + 0x37500c, + 0x3e16c5, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x216542, + 0x216543, + 0x2296c3, + 0x20d903, + 0x20cb83, + 0x216543, + 0x2296c3, + 0xd903, + 0x241c43, + 0x20cb83, + 0x1d5b83, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x793c8, + 0x216542, + 0x216543, + 0x3a8607, + 0x17b1c4, + 0x2296c3, + 0xbbc4, + 0x20cb83, + 0x19045, + 0x216542, + 0x2104c2, + 0x31d0c2, + 0x206002, + 0x205c02, + 0x2160c2, + 0x9a6c6, + 0x5c549, + 0x182487, + 0x1550e, + 0x99049, + 0x482ccc3, + 0x95c87, + 0x152e06, + 0x1643, + 0x11e505, 0xc1, - 0x522ea43, - 0x233fc3, - 0x280203, - 0x266a83, - 0x2191c3, - 0x23cb03, - 0x2e4c06, - 0x217fc3, - 0x23e083, - 0x234f43, - 0xae888, - 0x3b46c4, - 0x324547, - 0x201c83, - 0x39e284, - 0x2052c3, - 0x2054c3, - 0x266a83, - 0x178d87, + 0x5216543, + 0x222bc3, + 0x2f5503, + 0x343b43, + 0x243543, + 0x216443, + 0x2e8706, + 0x2296c3, + 0x20cb83, + 0x202883, + 0x793c8, + 0x209b84, + 0x3a2887, + 0x3a0f83, + 0x25e704, + 0x20d3c3, + 0x20d5c3, + 0x343b43, + 0xb46c7, 0x9c4, - 0x157bc3, - 0x2105, + 0x12db83, + 0x10e645, 0x66000c2, - 0x4ac43, - 0x6a12402, - 0x6e8b749, - 0x7091e09, - 0x923cd, - 0x9270d, - 0x2f6e82, - 0xe704, - 0x2149, + 0x53c43, + 0x6a16542, + 0x6e90b89, + 0x7096ac9, + 0x96f4d, + 0x9728d, + 0x31d0c2, + 0xb1b84, + 0x10e689, 0x2003c2, - 0x7623188, - 0x100ac4, - 0x320c03, - 0xae888, - 0x41184, - 0x140ea82, + 0x76b1a88, + 0x105504, + 0x320b43, + 0x793c8, + 0x49e04, + 0x1407242, 0x14005c2, - 0x140ea82, - 0x1519d46, - 0x230983, - 0x276243, - 0x7e2ea43, - 0x2319c4, - 0x8233fc3, - 0x8a66a83, - 0x209582, - 0x20e704, - 0x217fc3, - 0x3319c3, - 0x209282, - 0x23e083, - 0x2188c2, - 0x308483, - 0x207742, - 0x203b83, - 0x222403, - 0x207d02, - 0xae888, - 0x230983, - 0x210448, - 0x87319c3, - 0x209282, - 0x308483, - 0x207742, - 0x203b83, - 0x222403, - 0x207d02, - 0x2509c7, - 0x308483, - 0x207742, - 0x203b83, - 0x222403, - 0x207d02, - 0x22ea43, - 0x6c02, - 0xf4c3, - 0x31c2, - 0x293c2, - 0x4d82, - 0x8c82, - 0x72c2, - 0x43d42, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x2191c3, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x201b02, - 0x216983, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x24ac43, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x217fc3, - 0x23e083, - 0x37b845, - 0x21fcc2, + 0x1407242, + 0x151a146, + 0x23bb83, + 0x2cc803, + 0x7e16543, + 0x23a304, + 0x8622bc3, + 0x8f43b43, + 0x2042c2, + 0x2b1b84, + 0x2296c3, + 0x38c643, + 0x203c82, + 0x20cb83, + 0x221a42, + 0x30a303, + 0x201ec2, + 0x26a603, + 0x220b03, + 0x2089c2, + 0x793c8, + 0x82fdcc9, + 0x27b43, + 0x23bb83, + 0x20b2c8, + 0x8b8c643, + 0x203c82, + 0x30a303, + 0x201ec2, + 0x26a603, + 0x220b03, + 0x2089c2, + 0x259187, + 0x30a303, + 0x201ec2, + 0x26a603, + 0x220b03, + 0x2089c2, + 0x216543, + 0x4702, + 0x6c43, + 0x2bc2, + 0x13242, + 0xe8c2, + 0x11de42, + 0x4a42, + 0x4da82, + 0x253c43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x243543, + 0x216443, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x204642, + 0x21f6c3, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x8503, + 0x2d4c2, + 0x253c43, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x2296c3, + 0x20cb83, + 0x355b45, + 0x22a042, 0x2000c2, - 0xae888, - 0x1454408, - 0x7b64a, - 0x266a83, - 0x202881, + 0x793c8, + 0xaec0ad2, + 0x1472588, + 0x1b2b8a, + 0x3ec5, + 0x343b43, + 0x230d41, 0x2009c1, 0x200a01, - 0x201781, - 0x202101, - 0x20bac1, - 0x201d01, - 0x203001, - 0x230d41, + 0x202c41, + 0x201b41, + 0x211101, + 0x209c01, + 0x230e41, + 0x2fd181, 0x200001, 0x2000c1, 0x200201, - 0x146bc5, - 0xae888, + 0x146c05, + 0x793c8, 0x200101, 0x201381, 0x200501, @@ -2344,7257 +2355,7413 @@ var nodes = [...]uint32{ 0x200581, 0x2003c1, 0x200a81, - 0x20c241, + 0x2210c1, 0x200401, 0x200741, 0x2007c1, 0x200081, - 0x201501, - 0x207d01, - 0x20a8c1, - 0x202341, - 0x201c41, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x212402, - 0x22ea43, - 0x233fc3, + 0x2017c1, + 0x201641, + 0x207281, + 0x2024c1, + 0x208481, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x216542, + 0x216543, + 0x222bc3, 0x2003c2, - 0x23e083, - 0x1a083, - 0x178d87, - 0x7f3c7, - 0x36fc6, - 0x3a8ca, - 0x91248, - 0x54d88, - 0x55a47, - 0x6e8c6, - 0xec7c5, - 0x1b5a05, - 0x129783, - 0x13a06, - 0x134c46, - 0x24cd44, - 0x334907, - 0xae888, - 0x2e5904, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x12402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x32f388, - 0x207d44, - 0x233f04, - 0x204cc4, - 0x2bcd07, - 0x2e21c7, - 0x22ea43, - 0x23670b, - 0x323dca, - 0x34b9c7, - 0x238548, - 0x354a88, - 0x233fc3, - 0x25e4c7, - 0x280203, - 0x211448, - 0x212e49, - 0x20e704, - 0x2191c3, - 0x23b948, - 0x23cb03, - 0x2dfb4a, - 0x2e4c06, - 0x3b0107, - 0x217fc3, - 0x323606, - 0x2760c8, - 0x23e083, - 0x257546, - 0x2f93cd, - 0x2fba08, - 0x3010cb, - 0x2b2946, - 0x341847, - 0x21ecc5, - 0x3da84a, - 0x22ac05, - 0x24fc8a, - 0x21fcc2, - 0x20aa43, - 0x32a684, + 0x20cb83, + 0x22a83, + 0xb46c7, + 0x1cdf07, + 0x32f46, + 0x4280a, + 0x95748, + 0x60c88, + 0x61607, + 0xbc3c4, + 0x15c246, + 0xeec85, + 0x10f7c5, + 0x129643, + 0x30846, + 0x13906, + 0x25dd04, + 0x336147, + 0x793c8, + 0x2e8fc4, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x16542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x330b88, + 0x202304, + 0x23c4c4, + 0x20e804, + 0x39dac7, + 0x2e59c7, + 0x216543, + 0x23ec8b, + 0x33100a, + 0x38f947, + 0x300288, + 0x353348, + 0x222bc3, + 0x3c2e87, + 0x2f5503, + 0x214fc8, + 0x224309, + 0x2b1b84, + 0x243543, + 0x244688, + 0x216443, + 0x2e320a, + 0x2e8706, + 0x3b1d47, + 0x2296c3, + 0x2f1b46, + 0x3d2288, + 0x20cb83, + 0x275546, + 0x2fd6cd, + 0x2ffe48, + 0x305b0b, + 0x223bc6, + 0x354b47, + 0x21d985, + 0x22e68a, + 0x2fce05, + 0x26f9ca, + 0x22a042, + 0x201643, + 0x35a784, 0x200006, - 0x3ba683, - 0x2ae783, - 0x281bc3, - 0x207d43, - 0x323a43, - 0x2029c2, - 0x309b85, - 0x2b07c9, - 0x201ac3, - 0x240843, - 0x233f03, - 0x232283, + 0x3bb603, + 0x2b4083, + 0x28bb03, + 0x202303, + 0x37a403, + 0x202002, + 0x39d805, + 0x2b5949, + 0x209983, + 0x2492c3, + 0x203b43, + 0x216c43, 0x200201, - 0x39b3c7, - 0x2ec005, - 0x3c2003, - 0x2a4d43, - 0x3dff03, - 0x204cc4, - 0x356e43, - 0x227608, - 0x322bc3, - 0x310c4d, - 0x280c88, - 0x210606, - 0x28f1c3, - 0x366903, - 0x394443, - 0xce2ea43, - 0x233808, - 0x236704, - 0x23d3c3, - 0x241283, + 0x2d2807, + 0x2eaa85, + 0x3c1fc3, + 0x26b383, + 0x3e9683, + 0x20e804, + 0x3c3303, + 0x2271c8, + 0x35bdc3, + 0x3e1ecd, + 0x284a88, + 0x20b486, + 0x2e9443, + 0x35a2c3, + 0x361ac3, + 0xda16543, + 0x23bdc8, + 0x23ec84, + 0x247203, + 0x249f03, 0x200106, - 0x244e88, - 0x20f983, - 0x21fa43, - 0x2b6ec3, - 0x222383, - 0x3da883, - 0x22f203, - 0x233fc3, - 0x22d003, - 0x249203, - 0x24cbc3, - 0x28b003, - 0x28f143, - 0x20a003, + 0x24e888, + 0x266943, + 0x228fc3, + 0x2ba1c3, + 0x220a83, + 0x22e6c3, + 0x23a543, + 0x222bc3, + 0x22d743, + 0x255ec3, + 0x209a43, + 0x290583, + 0x325243, + 0x20ae83, + 0x232d43, + 0x3a4e85, + 0x25c504, + 0x25dfc7, + 0x25c082, + 0x260183, + 0x263c46, 0x265743, - 0x392345, - 0x2516c4, - 0x252a47, - 0x2ba882, - 0x254b03, - 0x258106, - 0x259243, - 0x259c43, - 0x27cc83, - 0x26f183, - 0x20b183, - 0x3b43c3, - 0x29d847, - 0xd266a83, - 0x2c3fc3, - 0x28f203, - 0x204903, - 0x20e703, - 0x2ed2c3, - 0x20e905, - 0x37fd83, - 0x24b709, + 0x266c03, + 0x2822c3, + 0x35cb03, + 0x21fb43, + 0x32d103, + 0x2a1807, + 0xe743b43, + 0x2d3103, + 0x207c83, + 0x20e443, + 0x26a7c3, + 0x217203, + 0x3b5945, + 0x37cac3, + 0x252749, 0x2012c3, - 0x313a83, - 0xd63cb83, - 0x2d5e43, - 0x204d03, - 0x218bc8, - 0x2b0706, - 0x26ef46, - 0x2ba8c6, - 0x38f887, - 0x205e03, - 0x215f83, - 0x23cb03, - 0x291346, - 0x20e982, - 0x2b8a83, - 0x33b645, - 0x217fc3, - 0x31da07, - 0x1605803, - 0x2760c3, - 0x212483, - 0x232383, - 0x235fc3, - 0x23e083, - 0x21d506, - 0x3b5d46, - 0x380703, - 0x2fa583, - 0x216983, - 0x250983, - 0x317883, - 0x306d43, - 0x308843, - 0x3a0645, - 0x235403, - 0x3b2a86, - 0x221d43, - 0x27fc88, - 0x2201c3, - 0x2201c9, - 0x273288, - 0x221e48, - 0x225645, - 0x36000a, - 0x38e84a, - 0x22f98b, - 0x238108, - 0x294983, - 0x2f2a03, - 0x393d83, - 0x39fa83, - 0x316248, - 0x37a903, - 0x38a184, - 0x21cc42, - 0x20de03, - 0x260e03, + 0x3139c3, + 0xea53203, + 0x2daf43, + 0x20e843, + 0x214808, + 0x2b5886, + 0x35c8c6, + 0x2be186, + 0x267347, + 0x202143, + 0x233243, + 0x216443, + 0x295846, + 0x2165c2, + 0x2e69c3, + 0x33c405, + 0x2296c3, + 0x31c887, + 0x160d903, + 0x29ae43, + 0x2183c3, + 0x23c9c3, + 0x23b843, + 0x20cb83, + 0x21de86, + 0x202fc6, + 0x37db83, + 0x29a8c3, + 0x21f6c3, + 0x259143, + 0x317783, + 0x309603, + 0x30ac03, + 0x219045, + 0x24c343, + 0x250046, + 0x21b103, + 0x2f4f88, + 0x22a543, + 0x22a549, + 0x37ad08, + 0x220548, + 0x22eac5, + 0x38ac4a, + 0x3e08ca, + 0x3e110b, + 0x3e1ac8, + 0x2aa6c3, + 0x230143, + 0x390b83, + 0x2f34c3, + 0x312588, + 0x355303, + 0x36c344, + 0x203bc2, + 0x22e683, + 0x24c0c3, 0x2007c3, - 0x22dc43, - 0x27b143, - 0x234f43, - 0x21fcc2, - 0x22b8c3, - 0x239683, - 0x319ec3, - 0x31b744, - 0x32a684, - 0x21cb03, - 0xae888, + 0x3d8883, + 0x281003, + 0x202883, + 0x22a042, + 0x2d3703, + 0x241403, + 0x31a2c3, + 0x31b444, + 0x35a784, + 0x227083, + 0x793c8, + 0xdf1854c, + 0xe2ac245, + 0xbb705, 0x2000c2, 0x200ac2, - 0x2029c2, - 0x201802, + 0x202002, + 0x202cc2, 0x200202, - 0x205082, - 0x249382, - 0x2031c2, + 0x202402, + 0x250cc2, + 0x202bc2, 0x200382, 0x200c42, - 0x349242, - 0x20a942, - 0x2720c2, + 0x30df02, + 0x203742, + 0x277782, 0x200a82, - 0x2f2402, - 0x205b42, - 0x211c82, - 0x216982, - 0x206002, - 0x205502, + 0x2160c2, + 0x208402, + 0x219d02, + 0x2037c2, + 0x2cb042, + 0x205d42, 0x200682, - 0x2113c2, + 0x214f42, + 0x212742, 0x202b02, - 0x208502, - 0x202442, - 0x207142, - 0x202a82, + 0x201702, + 0x203782, + 0x202a02, 0xc2, 0xac2, - 0x29c2, - 0x1802, + 0x2002, + 0x2cc2, 0x202, - 0x5082, - 0x49382, - 0x31c2, + 0x2402, + 0x50cc2, + 0x2bc2, 0x382, 0xc42, - 0x149242, - 0xa942, - 0x720c2, + 0x10df02, + 0x3742, + 0x77782, 0xa82, - 0xf2402, - 0x5b42, - 0x11c82, - 0x16982, - 0x6002, - 0x5502, + 0x160c2, + 0x8402, + 0x19d02, + 0x37c2, + 0xcb042, + 0x5d42, 0x682, - 0x113c2, + 0x14f42, + 0x12742, 0x2b02, - 0x8502, - 0x2442, - 0x7142, - 0x2a82, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x83c2, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x12402, - 0x212402, - 0x23e083, - 0xee2ea43, - 0x266a83, - 0x23cb03, - 0x1c0443, - 0x230242, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x5803, - 0x1c0443, - 0x23e083, - 0x3602, + 0x1702, + 0x3782, + 0x2a02, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x1642, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x16542, + 0x216542, + 0x20cb83, + 0x10216543, + 0x343b43, + 0x216443, + 0xeb2c7, + 0x7ca83, + 0x2386c2, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x30103, + 0x2296c3, + 0xd903, + 0x7ca83, + 0x20cb83, + 0xe042, 0x2001c2, - 0x1567b85, - 0x146bc5, - 0x210402, - 0xae888, - 0x12402, - 0x2359c2, - 0x206b02, - 0x208142, - 0x20e742, - 0x23bec2, - 0x1b5a05, - 0x201402, - 0x209282, + 0x15ca1c5, + 0x146c05, + 0x20cd42, + 0x793c8, + 0x16542, + 0x23dec2, + 0x204202, + 0x202702, + 0x23a382, + 0x24ba82, + 0x10f7c5, + 0x201482, + 0x203c82, 0x201102, - 0x2053c2, - 0x205b42, - 0x2408c2, - 0x20ee42, - 0x256382, - 0xfe72cc4, + 0x203382, + 0x208402, + 0x2473c2, + 0x20b5c2, + 0x21c282, + 0x11278384, 0x142, - 0x178d87, - 0x30a83, - 0x12808d, - 0xec849, - 0x118a0b, - 0xf0a88, - 0x5bd09, - 0x1145c6, - 0x266a83, - 0xae888, + 0xb46c7, + 0x15a43, + 0x1b63cd, + 0xeed09, + 0xef94b, + 0xf1748, + 0x64f09, + 0x114786, + 0x343b43, + 0x793c8, 0x9c4, - 0x157bc3, - 0x2105, - 0xae888, - 0xe7607, - 0x1104d007, - 0x56546, - 0x2149, - 0xa28e, - 0x14ca47, - 0x150e583, + 0x12db83, + 0x10e645, + 0x793c8, + 0xebe47, + 0x12455f47, + 0x12a5f244, + 0x62246, + 0x10e689, + 0xb448e, + 0x13e247, + 0x15d8303, + 0x12e0ad42, + 0x9989, + 0xa144, 0x2000c2, - 0x24cd44, - 0x212402, - 0x22ea43, - 0x204542, - 0x233fc3, - 0xfa03, + 0x25dd04, + 0x216542, + 0x216543, + 0x2104c2, + 0x222bc3, + 0x1a003, 0x200382, - 0x2e5904, - 0x2191c3, - 0x206a02, - 0x217fc3, - 0x3bec2, + 0x2e8fc4, + 0x243543, + 0x256e02, + 0x2296c3, + 0x4ba82, 0x2003c2, - 0x23e083, - 0x243bc6, - 0x333b4f, + 0x20cb83, + 0x24d906, + 0x33538f, 0x602, - 0x72a143, - 0x2f3c0a, - 0xae888, - 0x212402, - 0x280203, - 0x266a83, - 0x23cb03, - 0x5803, - 0x1522f06, - 0x1c4104, - 0xa288, - 0x140dbcb, - 0x156c4ca, - 0xf3289, - 0x15da64a, - 0x1513f07, - 0xaab4b, - 0x10d4c5, - 0xf0545, - 0x11d749, - 0x146bc5, - 0x178d87, - 0x1c4104, - 0xfe2c4, - 0x212402, - 0x22ea43, - 0x266a83, - 0x217fc3, + 0x723543, + 0x2f5dca, + 0x793c8, + 0x216542, + 0x2f5503, + 0x343b43, + 0x216443, + 0xd903, + 0x147b5e07, + 0x157cd06, + 0x13f046, + 0x14bc4b88, + 0x1db944, + 0x14ebe40a, + 0x15abe40d, + 0xb4488, + 0x142e44b, + 0x147888a, + 0x15c66b43, + 0xf3949, + 0x16104b48, + 0x1664c347, + 0x15e360a, + 0x1513e47, + 0xaec8b, + 0x16a9068c, + 0xa5545, + 0xcf9c5, + 0x11c5c9, + 0x1a0c84, + 0x117703, + 0x152be545, + 0x124443, + 0x15635c43, + 0x124443, + 0x1d7607, + 0x2bdc2, + 0x6502, + 0x6502, + 0x4182, + 0x6502, + 0x4a42, + 0xd42, + 0x3242, + 0x146c05, + 0xb46c7, + 0x1db944, + 0x102784, + 0x216542, + 0x216543, + 0x343b43, + 0x2296c3, 0x2000c2, 0x200c82, - 0x205102, - 0x1362ea43, - 0x23d542, - 0x233fc3, + 0x206342, + 0x17a16543, + 0x247382, + 0x222bc3, 0x201282, - 0x208882, - 0x266a83, - 0x23ca82, - 0x27b882, - 0x22c302, + 0x234402, + 0x343b43, + 0x2038c2, + 0x271cc2, + 0x22f402, 0x200cc2, - 0x295f42, + 0x29a402, 0x200802, 0x200d82, - 0x25b542, - 0x2295c2, - 0x205742, - 0x13150c, - 0x2be4c2, - 0x250d42, - 0x227082, - 0x24a282, - 0x23cb03, + 0x205102, + 0x2870c2, + 0x2027c2, + 0x132a0c, + 0x2c1442, + 0x25adc2, + 0x230c02, + 0x253582, + 0x216443, 0x200bc2, - 0x217fc3, - 0x209ec2, - 0x25c042, - 0x23e083, - 0x3081c2, - 0x208502, - 0x20a1c2, - 0x204782, + 0x2296c3, + 0x20f502, + 0x298642, + 0x20cb83, + 0x249342, + 0x202b02, + 0x20a0c2, + 0x2014c2, 0x2010c2, - 0x230ac2, - 0x205682, - 0x22b302, - 0x2270c2, - 0x32748a, - 0x36f50a, - 0x3a124a, - 0x3e2d42, - 0x208902, - 0x20e8c2, - 0x13aa7f09, - 0x13f61e8a, - 0x142fc47, - 0x142050c2, - 0x143a083, - 0x1742, - 0x161e8a, - 0x162b0e, - 0x241ec4, - 0x57fc5, - 0x14a2ea43, - 0x3dc03, - 0x233fc3, - 0x24d704, - 0x266a83, - 0x20e704, - 0x2191c3, - 0x13d289, - 0x157686, - 0x23cb03, - 0xf1584, - 0x1598c3, - 0x217fc3, - 0x2a7c5, - 0x205803, - 0x23e083, - 0x1466d84, - 0x235403, - 0x181584, - 0x20aa43, - 0xae888, - 0x154f043, - 0x12a086, - 0x146e844, - 0x1a45, - 0x14c80a, - 0x124d82, - 0x15408acd, - 0x1adec6, - 0x159a140b, - 0xc951, - 0x15ea7f09, - 0x1ac8, - 0x69908, - 0x1c9415c7, - 0x3502, - 0xa8087, - 0x221ce, - 0x146bcb, - 0x14a88b, - 0x1c008a, - 0x1683c7, - 0xae888, - 0x120d48, - 0xa807, - 0x1cc176cb, - 0x1a087, - 0xcfc2, - 0x2b20d, - 0x16a7c7, - 0xb1bca, - 0x1e174f, - 0x12308f, - 0x161e82, - 0x12402, - 0x8af48, - 0x1d10778c, - 0x1570a, - 0xe710a, - 0x19004a, - 0x80a88, - 0x1d208, - 0x5a488, - 0xe75c8, - 0x1388, - 0xf982, - 0x167c0f, - 0xc6d8b, - 0x10f508, - 0x35cc7, - 0x4878a, - 0xbc3cb, - 0x34449, - 0x48687, - 0x83986, - 0x1d108, - 0x18ea0c, - 0x161347, - 0x1ae40a, - 0xec88, - 0x10ae8e, - 0x10b64e, - 0x16820b, - 0x168a8b, - 0x658cb, - 0x66609, - 0x6754b, - 0xbd4cd, - 0xf548b, - 0xf5fcd, - 0xf634d, - 0x10360a, - 0x12a4cb, - 0x166c0b, - 0x3bfc5, - 0x1d58b810, - 0x13514f, - 0x72e8f, - 0x2470d, - 0x13d450, - 0x293c2, - 0x1da1f8c8, - 0x7f248, - 0xea790, - 0x17fe0e, - 0x1df22b85, - 0x4c84b, - 0x13c390, - 0x1d30a, - 0x168c49, - 0x680c7, - 0x68407, - 0x685c7, - 0x68947, - 0x69e07, - 0x6a2c7, - 0x6bb07, - 0x6c047, - 0x6d587, - 0x6d907, - 0x6dfc7, - 0x6e187, - 0x6e347, - 0x6e507, - 0x6f307, - 0x6fc47, - 0x70a87, - 0x70e47, - 0x71487, - 0x71747, - 0x71907, - 0x71c07, - 0x71f87, - 0x72187, - 0x748c7, - 0x74a87, - 0x74c47, - 0x75dc7, - 0x77207, - 0x776c7, - 0x77dc7, - 0x78087, - 0x78407, - 0x785c7, - 0x789c7, - 0x78e07, - 0x792c7, - 0x79847, - 0x79a07, - 0x79bc7, - 0x7a007, - 0x7aa87, - 0x7afc7, - 0x7b207, - 0x7b3c7, - 0x7bb87, - 0x7c187, - 0x9a42, - 0x5a58a, - 0x13808, - 0x1baf8c, - 0x4eb87, - 0x918c5, - 0x9b311, - 0x1bb46, - 0x104dca, - 0x8adca, - 0x56546, - 0xb3ecb, + 0x215a82, + 0x20d782, + 0x232982, + 0x22cec2, + 0x325d0a, + 0x36918a, + 0x39ecca, + 0x3e9b42, + 0x20cec2, + 0x2be702, + 0x17f8cc49, + 0x183bb68a, + 0x14380c7, + 0x18601682, + 0x1430483, + 0x2c02, + 0x1bb68a, + 0x14f0ce, + 0x21d684, + 0xe8805, + 0x18e16543, + 0x48383, + 0x222bc3, + 0x256d44, + 0x343b43, + 0x2b1b84, + 0x243543, + 0x13e049, + 0x133e86, + 0x216443, + 0xf1dc4, + 0x1b03, + 0x2296c3, + 0x149f05, + 0x20d903, + 0x20cb83, + 0x1561c04, + 0x24c343, + 0x114bc4, + 0x201643, + 0x793c8, + 0x154db43, + 0x123486, + 0x155c1c4, + 0x1a0d45, + 0x1a0a8a, + 0x130602, + 0x199a16cd, + 0x1b3dc6, + 0x147f11, + 0x19f8cc49, + 0x1a0dc8, + 0x42008, + 0x20869487, + 0x3b42, + 0x18cdc7, + 0x208ce, + 0x146c0b, + 0x148d8b, + 0x1c0dca, + 0x34347, + 0x793c8, + 0xb4188, + 0xfd87, + 0x20c1fe0b, + 0x22a87, + 0x4242, + 0x3288d, + 0x163907, + 0x127b0a, + 0x12510c, + 0x1252cf, + 0x1ca4cf, + 0x212eb34d, + 0x2e702, + 0x16542, + 0x904c8, + 0x214e91cc, + 0x1aab8a, + 0xeb94a, + 0x7d54a, + 0x84888, + 0x1db88, + 0x68608, + 0xebe08, + 0x17bbc8, + 0x3242, + 0x1ca24f, + 0xcaa8b, + 0x1dcf08, + 0x3e1c7, + 0x874ca, + 0x3aa4b, + 0x51b89, + 0x873c7, + 0x136f46, + 0x1da88, + 0x1e0a8c, + 0xf4547, + 0x31a0a, + 0x1c74c8, + 0x32f4e, + 0x3370e, + 0x3418b, + 0x3518b, + 0x3678b, + 0xfc849, + 0x880cb, + 0xb688d, + 0x158a8b, + 0xf7a8d, + 0xf7e0d, + 0x12378a, + 0x15a5cb, + 0x1e150b, + 0x3f545, + 0x219c4bd0, + 0x21c41a88, + 0x3610f, + 0x7854f, + 0x2254d, + 0x17a710, + 0x13242, + 0x22258908, + 0x1cdd88, + 0x1b0350, + 0x106a4e, + 0x2275bd85, + 0x555cb, + 0x13d150, + 0x1dc8a, + 0x35349, + 0x6ff47, + 0x70287, + 0x70447, + 0x71587, + 0x72407, + 0x72787, + 0x734c7, + 0x73a07, + 0x73f07, + 0x74287, + 0x74947, + 0x74b07, + 0x74cc7, + 0x74e87, + 0x75207, + 0x756c7, + 0x75ec7, + 0x76287, + 0x768c7, + 0x76b87, + 0x76d47, + 0x77047, + 0x77647, + 0x77847, + 0x78d07, + 0x78ec7, + 0x79087, + 0x79807, + 0x7a047, + 0x7a8c7, + 0x7d387, + 0x7d7c7, + 0x7db47, + 0x7dd07, + 0x7e107, + 0x7e547, + 0x7ea07, + 0x7ef87, + 0x7f147, + 0x7f307, + 0x7f747, + 0x7fd07, + 0x80247, + 0x80847, + 0x80a07, + 0x810c7, + 0x81607, + 0xc342, + 0x6870a, + 0x1a608, + 0x1bbfcc, + 0x12fb47, + 0x44405, + 0xc3d91, + 0x13dc6, + 0x12100a, + 0x9034a, + 0x62246, + 0xb7f4b, 0x642, - 0x31351, - 0xc5d89, - 0x9bf49, - 0x9d306, - 0x5b542, - 0x1b21ca, - 0xafcc9, - 0xb040f, - 0xb0a0e, - 0xb3108, - 0x11b08, - 0xb5c2, - 0x6ed89, - 0x1e3586c9, - 0xbd049, - 0xbd04c, - 0x8f90e, - 0x4b8c, - 0xf2f8f, - 0x1bf08e, - 0x12b40c, - 0x33449, - 0x45391, - 0x45948, - 0x1a4e12, - 0x593cd, - 0x69acd, - 0x78f8b, - 0x81855, - 0x860c9, - 0x1518ca, - 0x188809, - 0x1aad50, - 0x1ae8cb, - 0x9890f, - 0xa868b, - 0xa914c, - 0xaa110, - 0xb7dca, - 0xb894d, - 0xd3a0e, - 0x195a0a, - 0xc1e8c, - 0xc4e94, - 0xc5a11, - 0xc694b, - 0xc858f, - 0xcbb0d, - 0xcd20e, - 0xd048c, - 0xd0c8c, - 0xd370b, - 0x172a8e, - 0x199ed0, - 0xdba8b, - 0xdc74d, - 0xdf30f, - 0xe804c, - 0xe9d8e, - 0xf3651, - 0x10570c, - 0x1d4047, - 0x10d14d, - 0x11db8c, - 0x144550, - 0x16528d, - 0x16efc7, - 0x176790, - 0x19dd08, - 0x1a3e8b, - 0xba1cf, - 0x1bb208, - 0x14bf0d, - 0x1125d0, - 0x178c89, - 0x1e78b7c8, - 0x1eabf946, - 0xc0843, - 0x3ec49, - 0xc7405, - 0x6902, - 0x48c09, - 0x14c50a, - 0x1efa52c6, - 0x15a52cd, - 0x1f36a9c4, - 0x57d06, - 0x1b68a, - 0x27bcd, - 0x1f52b109, - 0x216c3, - 0x11bb8a, - 0xe6751, - 0xe6b89, - 0xe7087, - 0xe7d88, - 0xe8447, - 0x4ec48, - 0xcacb, - 0x1311c9, - 0xf1e10, - 0xf22cc, - 0x1faf270d, - 0xf3a88, - 0xf4ec5, - 0x147e08, - 0x19ce4a, - 0x18a347, - 0x2542, - 0x1ff3f5d5, - 0x13d08a, - 0x1320c9, - 0x9e588, - 0x6ab09, - 0x7cb45, - 0x11d88a, - 0x92e0f, - 0x10d54b, - 0x11ff4c, - 0x176cd2, - 0xe9c6, - 0x7ce85, - 0x117a48, - 0xf84cb, - 0xf1151, - 0x16acc7, - 0x4da0a, - 0x20300485, - 0x1b330c, - 0x139c43, - 0x197a86, - 0x408c2, - 0x1089cb, - 0x10948a, - 0x150980c, - 0x7f5c8, - 0xf6188, - 0x2069e606, - 0x17d5c7, - 0xd782, - 0x7742, - 0x1a55d0, - 0x65087, - 0x3074f, - 0x13a06, - 0xd2b8e, - 0x99a0b, - 0x3dd48, - 0x34809, - 0x5da12, - 0x197b4d, - 0x118088, - 0x1188c9, - 0xee00d, - 0x19f749, - 0xb48b, - 0x6c348, - 0x71d88, - 0x75a88, - 0x80389, - 0x8058a, - 0x84b0c, - 0x166eca, - 0xf17ca, - 0x1178c7, - 0x9a50a, - 0x1cda4d, - 0x45c51, - 0x20acd506, - 0x1b994b, - 0x12f80c, - 0x94388, - 0x149449, - 0x160b0d, - 0x68b90, - 0x1812cd, - 0x4642, - 0x4a68d, - 0x72c2, - 0x1f702, - 0x11780a, - 0x756ca, - 0x20e7b508, - 0x104cca, - 0x11f80b, - 0x10b8cc, - 0x12048a, - 0x12070f, - 0x120ace, - 0x171cd, - 0x211e2c05, - 0x12d408, - 0x3602, - 0x1422383, - 0x415505, - 0x45d884, - 0x16202c0e, - 0x16b59cce, - 0x1720180a, - 0x17b9184e, - 0x1835788e, - 0x18b7f38c, - 0x142fc47, - 0x142fc49, - 0x143a083, - 0x1926060c, - 0x19b49bc9, - 0x1a36af09, - 0x1ab71749, - 0x1742, - 0x2b51, - 0x159c11, - 0x174d, - 0x1b6451, - 0x1577d1, - 0x17f2cf, - 0x6054f, - 0x149b0c, - 0x16ae4c, - 0x17168c, - 0x1af28d, - 0x15d915, - 0xc1a8c, - 0xc778c, - 0x135a10, - 0x141acc, - 0x14af8c, - 0x18ad99, - 0x191599, - 0x1bdfd9, - 0x1cb4d4, - 0x1d6294, - 0x1e02d4, - 0x1e2714, - 0xa994, - 0x1b2c1b49, - 0x1b9e0589, - 0x1c2c7849, - 0x16645b49, - 0x1742, - 0x16e45b49, - 0x1742, - 0xa98a, - 0x1742, - 0x17645b49, - 0x1742, - 0xa98a, - 0x1742, - 0x17e45b49, - 0x1742, - 0x18645b49, - 0x1742, - 0x18e45b49, - 0x1742, - 0xa98a, - 0x1742, - 0x19645b49, - 0x1742, - 0xa98a, - 0x1742, - 0x19e45b49, - 0x1742, - 0x1a645b49, - 0x1742, - 0xa98a, - 0x1742, - 0x1ae45b49, - 0x1742, - 0xa98a, - 0x1742, - 0x1b645b49, - 0x1742, - 0x1be45b49, - 0x1742, - 0x1c645b49, - 0x1742, - 0xa98a, - 0x1742, + 0x39c91, + 0xc5889, + 0xa0689, + 0xa12c6, + 0x5102, + 0x9c50a, + 0xb4e49, + 0xb558f, + 0xb5b8e, + 0xb7288, + 0x22a17a92, + 0x19b88, + 0x22f2fd07, + 0x1ec82, + 0x15c709, + 0x15490a, + 0x23347589, + 0x19de09, + 0x19de0c, + 0x15f4b, + 0x436ce, + 0xe6cc, + 0xf364f, + 0x1bfdce, + 0x4594c, + 0x5e789, + 0x658d1, + 0x65e88, + 0x7bd12, + 0x7cd4d, + 0x7e6cd, + 0x8564b, + 0x8b795, + 0x932c9, + 0x18500a, + 0x1b0049, + 0x1d4350, + 0x99acb, + 0x9ee0f, + 0xa3fcb, + 0xad6cc, + 0xbac90, + 0xd844a, + 0x18264d, + 0x19210e, + 0xbc48a, + 0xc090c, + 0x1997d4, + 0xc5511, + 0xca64b, + 0xccc8f, + 0xd048d, + 0xd42ce, + 0xd55cc, + 0xd5dcc, + 0xd814b, + 0x14284e, + 0x197d50, + 0x1aa38b, + 0xddacd, + 0xe730f, + 0xec90c, + 0x108b4e, + 0x10c891, + 0x18214c, + 0x11ca07, + 0x144e8d, + 0x15ffcc, + 0x1693d0, + 0x17208d, + 0x172dc7, + 0x195a10, + 0x1a5888, + 0x1abd0b, + 0xbd9cf, + 0x1bc248, + 0x68e8d, + 0x111f10, + 0x174389, + 0x237c4b88, + 0x23ac2a86, + 0xc3943, + 0x52a89, + 0x54c9, + 0xcbc45, + 0x7bc2, + 0x18fd89, + 0x62c8a, + 0x23e7c1c6, + 0x147c1cd, + 0x24363b04, + 0x1da806, + 0x2630a, + 0x2778d, + 0x246da54b, + 0x2484f809, + 0x2b203, + 0x11b88a, + 0xe9951, + 0xe9d89, + 0xeb8c7, + 0xec648, + 0xecd07, + 0x12fc08, + 0x14808b, + 0x1326c9, + 0xf2550, + 0xf2a0c, + 0x24ef31cd, + 0xf5c48, + 0xf7685, + 0x1d0608, + 0x19a8ca, + 0x16c507, + 0x1cc2, + 0x25239155, + 0x13de4a, + 0x1363c9, + 0x5688, + 0xa2549, + 0x1df185, + 0x11c70a, + 0x97c4f, + 0xa55cb, + 0x15ee8c, + 0xc8052, + 0x1b5a06, + 0x9a905, + 0x15f148, + 0xfa0cb, + 0xfa9d1, + 0x143847, + 0x5788a, + 0x25704a05, + 0x1b560c, + 0x13a843, + 0x1953c6, + 0x473c2, + 0x10ad8b, + 0x10b8ca, + 0x150bc4c, + 0xf48c8, + 0xf7c48, + 0x25a05706, + 0x1b7287, + 0x4a02, + 0x1ec2, + 0x1a6e50, + 0x67dc7, + 0x67ecf, + 0x30846, + 0x12270e, + 0x9d3cb, + 0x46c88, + 0x51f49, + 0x117052, + 0x11820d, + 0x118d88, + 0xef809, + 0x19c60d, + 0x112c9, + 0x6824b, + 0x69d88, + 0x73d08, + 0x75388, + 0x771c9, + 0x773ca, + 0x799cc, + 0x1e17ca, + 0xf14ca, + 0x1177c7, + 0xa38ca, + 0x738d, + 0x174cd1, + 0x25ed45c6, + 0x17768b, + 0xbe0c, + 0x414c8, + 0x3d609, + 0x14c6cd, + 0x57110, + 0x190c8d, + 0x6502, + 0x6540d, + 0x4a42, + 0x66342, + 0x11770a, + 0x263d034a, + 0x25c4a, + 0x26680b48, + 0x120f0a, + 0x12f4cb, + 0x3398c, + 0x1203ca, + 0x2692064f, + 0x120a0e, + 0x26de9a05, + 0x12bf48, + 0xe042, + 0x1420a83, + 0x1a38e20e, + 0x1ab2eb8e, + 0x1b202cca, + 0x1bb7c04e, + 0x1c32d84e, + 0x1cb3408c, + 0x14380c7, + 0x14380c9, + 0x1430483, + 0x1d3419cc, + 0x1db54e89, + 0x1e36d309, + 0x1eba0889, + 0x2c02, + 0x1a3511, + 0x12ead1, + 0x2c0d, + 0x17bf91, + 0x12d791, + 0x133fcf, + 0x14190f, + 0x154dcc, + 0x16d24c, + 0x1a07cc, + 0x1b764d, + 0x17d415, + 0xc510c, + 0xe140c, + 0x1439d0, + 0x14a18c, + 0x18758c, + 0x18df59, + 0x1bed19, + 0x1cabd9, + 0x1cc7d4, + 0x1d2454, + 0x1e8694, + 0x5e54, + 0xff14, + 0x1f2c51c9, + 0x1f9e8949, + 0x202e14c9, + 0x1a666089, + 0x2c02, + 0x1ae66089, + 0x2c02, + 0x5e4a, + 0x2c02, + 0x1b666089, + 0x2c02, + 0x5e4a, + 0x2c02, + 0x1be66089, + 0x2c02, + 0x1c666089, + 0x2c02, + 0x1ce66089, + 0x2c02, + 0x5e4a, + 0x2c02, + 0x1d666089, + 0x2c02, + 0x5e4a, + 0x2c02, + 0x1de66089, + 0x2c02, + 0x1e666089, + 0x2c02, + 0x5e4a, + 0x2c02, + 0x1ee66089, + 0x2c02, + 0x5e4a, + 0x2c02, + 0x1f666089, + 0x2c02, + 0x1fe66089, + 0x2c02, + 0x20666089, + 0x2c02, + 0x5e4a, + 0x2c02, 0x1400401, - 0xc945, - 0x1c0084, - 0x144ce03, - 0x1426d83, - 0x14fa443, - 0x2c0e, - 0x159cce, - 0x8450e, - 0x180a, - 0x19184e, - 0x15788e, - 0x17f38c, - 0x6060c, - 0x149bc9, - 0x16af09, - 0x171749, - 0xc1b49, - 0x1e0589, - 0xc7849, - 0x135acd, - 0x141b89, - 0xac49, - 0x12d5c4, - 0x132ac4, - 0x1c8a04, - 0x1c95c4, - 0xaae04, - 0x2ec44, - 0x3cd84, - 0x192d44, - 0x13904, - 0xbec06, - 0x59504, - 0x158e7c3, - 0x149987, - 0x148574c, - 0x1ac3, - 0x293c2, - 0x107788, - 0xd1784, - 0x14386, - 0xd8a84, - 0x15aa06, - 0x16b82, - 0xa8c1, - 0x20e44, - 0xb1706, - 0x171c3, - 0x1ac3, - 0xa0e83, - 0x13d385, - 0x124dc2, - 0x124dc8, - 0xeb947, - 0x131247, - 0xf982, + 0x147f05, + 0x1c0dc4, + 0x8903, + 0x8502, + 0x54642, + 0x1419303, + 0x1403603, + 0x14fea83, + 0x18e20e, + 0x12eb8e, + 0x89e8e, + 0x2cca, + 0x17c04e, + 0x12d84e, + 0x13408c, + 0x1419cc, + 0x154e89, + 0x16d309, + 0x1a0889, + 0xc51c9, + 0x1e8949, + 0xe14c9, + 0x143a8d, + 0x6109, + 0x101c9, + 0x3d1c2, + 0x1cbcc4, + 0x1cec84, + 0x1d1104, + 0x1df604, + 0xaef44, + 0xacdc4, + 0x4a9c4, + 0x35644, + 0x1a704, + 0x136fc4, + 0x7b0c9, + 0x7b0cc, + 0x158286, + 0x15828e, + 0x7ce84, + 0x155cf03, + 0x14a007, + 0x148ae0c, + 0x9983, + 0x136fc4, + 0x13242, + 0xe91c8, + 0xd6b04, + 0x1e9706, + 0xdd5c4, + 0x121646, + 0x1f8c2, + 0x7281, + 0x27c44, + 0x69306, + 0x15b83, + 0x9983, + 0x71703, + 0xc7e43, + 0x14803, + 0xf7a03, + 0xc8045, + 0x5adc2, + 0x148a42, + 0x1a1e88, + 0xee7c7, + 0x132747, + 0x3242, 0x2000c2, - 0x212402, - 0x204542, - 0x20fa02, + 0x216542, + 0x2104c2, + 0x218242, 0x200382, 0x2003c2, - 0x207742, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e703, - 0x217fc3, - 0x23e083, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x217fc3, - 0x23e083, - 0x10303, - 0x266a83, - 0xe704, + 0x201ec2, + 0x216543, + 0x222bc3, + 0x343b43, + 0x26a7c3, + 0x2296c3, + 0x20cb83, + 0x793c8, + 0x216543, + 0x222bc3, + 0x2296c3, + 0x20cb83, + 0xb303, + 0x343b43, + 0xb1b84, 0x2000c2, - 0x24ac43, - 0x2362ea43, - 0x392747, - 0x266a83, - 0x21e1c3, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x226e0a, - 0x243bc5, - 0x216983, - 0x22dc42, - 0xae888, - 0x23adad8a, + 0x253c43, + 0x29216543, + 0x3a5287, + 0x343b43, + 0x21b283, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x23098a, + 0x24d905, + 0x21f6c3, + 0x213402, + 0x793c8, + 0x296df98a, 0xe01, - 0xae888, - 0x12402, - 0x137ac2, - 0x2432ae8b, - 0x2462e004, - 0x16a905, - 0x8cc5, - 0x107786, - 0x24a08cc5, - 0x54383, - 0x5cd83, + 0x793c8, + 0x16542, + 0x138402, + 0x29e4f58b, + 0x2a2093c4, + 0x163a45, + 0x1403ec5, + 0xe91c6, + 0x2a603ec5, + 0x5fa83, + 0x1b0243, 0x9c4, - 0x157bc3, - 0x2105, - 0x146bc5, - 0xae888, - 0x1a087, - 0x2ea43, - 0x2ed4d, - 0x2523a707, - 0x159146, - 0x25401645, - 0x1c0992, - 0x159207, - 0x1dbca, - 0x10ac8, - 0x1dac7, - 0x6bcca, - 0x1bc448, - 0xe4f07, - 0x1ac70f, - 0x36fc7, - 0x192b46, - 0x13c390, - 0xcee8f, - 0x21c49, - 0x57d84, - 0x259592ce, - 0x185a89, - 0x6e646, - 0x111a89, - 0x193c86, - 0x1c2e06, - 0x4f10c, - 0xbc5ca, - 0x345c7, - 0x17edca, - 0x1596c9, - 0xf8e8c, - 0x1c8ca, - 0x4b8ca, - 0x2149, - 0x57d06, - 0x3468a, - 0x118f4a, - 0xa3a4a, - 0x137509, - 0xe54c8, - 0xe5746, - 0xed88d, - 0x5130b, - 0xc7c05, - 0x25f5a28c, - 0x14ca47, - 0x110289, - 0xd1047, - 0xc6114, - 0x1129cb, - 0x10f34a, - 0x5d88a, - 0xac80d, - 0x151fa09, - 0x117e4c, - 0x1186cb, - 0x88c3, - 0x88c3, - 0x36fc6, - 0x88c3, - 0x107788, - 0x15c103, - 0x46604, - 0x54603, - 0x347c5, - 0x1475903, - 0x51709, - 0xf84cb, - 0x14e82c3, - 0x154546, - 0x15037c7, - 0x1aafc7, - 0x26d41489, - 0x17e86, - 0x4ac43, - 0xae888, - 0x12402, - 0x4d704, - 0x61083, - 0x17b845, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x233f03, - 0x22ea43, - 0x233fc3, - 0x280203, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x2bd443, - 0x20aa43, - 0x233f03, - 0x24cd44, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x204ac3, - 0x28541585, - 0x142e6c3, - 0x22ea43, - 0x233fc3, - 0x20fa03, - 0x280203, - 0x266a83, - 0x20e704, - 0x3433c3, - 0x215f83, - 0x23cb03, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x216983, - 0x29219f03, - 0x176bc9, - 0x12402, - 0x3c7603, - 0x29e2ea43, - 0x233fc3, - 0x249283, - 0x266a83, - 0x2220c3, - 0x215f83, - 0x23e083, - 0x3005c3, - 0x3cd604, - 0xae888, - 0x2a62ea43, - 0x233fc3, - 0x2b31c3, - 0x266a83, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x2302c3, - 0xae888, - 0x2ae2ea43, - 0x233fc3, - 0x280203, - 0x205803, - 0x23e083, - 0xae888, - 0x142fc47, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x146bc5, - 0x178d87, - 0xc634b, - 0xe6f84, - 0xc7c05, - 0x1454408, - 0x2c10d, - 0x2c242285, - 0x27c44, - 0x12402, - 0x10103, - 0x184485, - 0x30242, - 0x53c2, - 0x34b8c5, - 0xae888, - 0x88c2, - 0x1b2c3, - 0x16b88f, - 0x12402, - 0x1063c6, + 0x12db83, + 0x10e645, + 0x146c05, + 0x793c8, + 0x22a87, + 0x16543, + 0x1b4bcd, + 0x2ae42647, + 0x1386, + 0x2b17be85, + 0x186012, + 0x1447, + 0x1e48a, + 0x17588, + 0x1e387, + 0x7368a, + 0x1bd188, + 0x110a47, + 0x165d8f, + 0x3db87, + 0x4bb86, + 0x13d150, + 0x19350f, + 0x1b009, + 0x1da884, + 0x2b40150e, + 0x5b0c9, + 0x74fc6, + 0x1113c9, + 0x190a86, + 0x6ac6, + 0xb8e4c, + 0x3ac4a, + 0x51d07, + 0x14140a, + 0x1909, + 0x25e8c, + 0x2954a, + 0x6b44a, + 0x10e689, + 0x1da806, + 0x51dca, + 0x11934a, + 0xa954a, + 0x114309, + 0xe8b88, + 0xe8e06, + 0xef08d, + 0x5b88b, + 0xcc205, + 0x2bb1e14c, + 0x13e247, + 0x10fcc9, + 0xd6187, + 0xc5c14, + 0x11230b, + 0x1dcd4a, + 0x116eca, + 0xb080d, + 0x152f6c9, + 0x117fcc, + 0x118b8b, + 0x31a03, + 0x31a03, + 0x32f46, + 0x31a03, + 0xe91c8, + 0x157243, + 0x4ebc4, + 0x5fc83, + 0x14a9607, + 0x51f05, + 0x15186c3, + 0x5c549, + 0xc8045, + 0xfa0cb, + 0x14ecb83, + 0x152e06, + 0x1523947, + 0x1d45c7, + 0x2c97ce89, + 0x1d1a86, + 0x53c43, + 0x793c8, + 0x16542, + 0x56d44, + 0x43ac3, + 0x155b45, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x203b43, + 0x216543, + 0x222bc3, + 0x2f5503, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x2a0843, + 0x201643, + 0x203b43, + 0x25dd04, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x20e603, + 0x2192c3, + 0x213402, + 0x2e17cf85, + 0x1438003, + 0x216543, + 0x222bc3, + 0x21a003, + 0x2f5503, + 0x343b43, + 0x2b1b84, + 0x34b203, + 0x233243, + 0x216443, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x21f6c3, + 0x2ee0fc03, + 0xc7f49, + 0x16542, + 0x225103, + 0x2fa16543, + 0x222bc3, + 0x252183, + 0x343b43, + 0x2207c3, + 0x233243, + 0x20cb83, + 0x2037c3, + 0x3df304, + 0x793c8, + 0x30216543, + 0x222bc3, + 0x2b7343, + 0x343b43, + 0x216443, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x238743, + 0x793c8, + 0x30a16543, + 0x222bc3, + 0x2f5503, + 0x20d903, + 0x20cb83, + 0x793c8, + 0x14380c7, + 0x253c43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x146c05, + 0xb46c7, + 0xc5e4b, + 0x31a3ca06, + 0x31efdb4b, + 0xea184, + 0xcc205, + 0x1472588, + 0x2f20d, + 0x1c4b88, + 0x136fc4, + 0x3264ccc5, + 0x27804, + 0x16542, + 0x1a143, + 0x158185, + 0x386c2, + 0x34aac5, + 0x793c8, + 0x33e98f0d, + 0x343a11ca, + 0x24642, + 0x5483, + 0x164f4f, + 0x18242, + 0x7ce84, + 0x136fc4, + 0x16542, 0x2000c2, - 0x24ac43, - 0x22ea43, - 0x266a83, - 0x20e704, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x216983, - 0x30242, - 0x32ff08, - 0x24cd44, - 0x37e046, - 0x3af146, - 0xae888, - 0x31a6c3, - 0x355c09, - 0x30ddd5, - 0x10dddf, - 0x22ea43, - 0x7fa87, - 0x242992, - 0x1623c6, - 0x16fd05, - 0x1d30a, - 0x168c49, - 0x24274f, - 0x2e5904, - 0x2bbf05, - 0x313850, - 0x215f87, - 0x205803, - 0x321388, - 0x134b86, - 0x293b0a, - 0x223144, - 0x2ffec3, - 0x22dc42, - 0x2fa00b, - 0x5803, - 0x182c04, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0x307183, - 0x212402, - 0x1c06c3, - 0x2a4c4, - 0x217fc3, - 0x23e083, - 0x2fc39fc5, - 0x1d5cc6, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x21e1c3, - 0x265dc3, - 0x23e083, - 0x4ac43, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x217fc3, - 0x5803, - 0x23e083, - 0x17082, + 0x253c43, + 0x216543, + 0x343b43, + 0x2b1b84, + 0x216443, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x21f6c3, + 0x216543, + 0x222bc3, + 0x2296c3, + 0x20cb83, + 0x19045, + 0x331408, + 0x25dd04, + 0x379ac6, + 0x3a0686, + 0x793c8, + 0x2b6643, + 0x2f6689, + 0x21c495, + 0x1c49f, + 0x216543, + 0xf4d87, + 0x38db12, + 0x16a146, + 0x182c45, + 0x1dc8a, + 0x35349, + 0x38d8cf, + 0x2e8fc4, + 0x237a05, + 0x313790, + 0x2b1087, + 0x20d903, + 0x2c2308, + 0x13846, + 0x29fc4a, + 0x26fb04, + 0x304443, + 0x213402, + 0x2fe64b, + 0x222bc3, + 0x343b43, + 0xd903, + 0x15b044, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x309a43, + 0x216542, + 0x187003, + 0x149c04, + 0x2296c3, + 0x20cb83, + 0x364419c5, + 0x1de746, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x21b283, + 0x233c83, + 0x20cb83, + 0x53c43, + 0x216542, + 0x216543, + 0x222bc3, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x19f42, 0x2000c2, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x8cc5, - 0x1ac3, - 0x24cd44, - 0x22ea43, - 0x233fc3, - 0x217544, - 0x217fc3, - 0x23e083, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x1357c9, - 0x4cc4, - 0x22ea43, - 0xf982, - 0x233fc3, - 0x280203, - 0x204903, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0x2a82, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x36a584, - 0x20e704, - 0x217fc3, - 0x23e083, - 0x20aa43, - 0x6c02, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x2f4c43, - 0x160c3, - 0x1e1c3, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x32748a, - 0x345389, - 0x36500b, - 0x3657ca, - 0x36f50a, - 0x37c54b, - 0x393a4a, - 0x399a4a, - 0x3a124a, - 0x3a1c4b, - 0x3c4709, - 0x3cf9ca, - 0x3cfe0b, - 0x3db28b, - 0x3e0d8a, - 0xcdc2, - 0x22ea43, - 0x233fc3, - 0x280203, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0xcc4b, - 0x17fe07, - 0x5af88, - 0xee144, - 0x1c4104, - 0x94dc8, - 0xea706, - 0xcc06, - 0x1a07c9, - 0xae888, - 0x22ea43, - 0x1d304, - 0x2680c4, - 0x201c02, - 0x21e484, - 0x202645, - 0x233f03, - 0x24cd44, - 0x22ea43, - 0x236704, - 0x233fc3, - 0x24d704, - 0x2e5904, - 0x20e704, - 0x215f83, - 0x217fc3, - 0x23e083, - 0x24a845, - 0x204ac3, - 0x216983, - 0x204343, - 0x2ddf84, - 0x32a004, - 0x23a185, - 0xae888, - 0x3b4e04, - 0x3c2f86, - 0x202284, - 0x212402, - 0x3770c7, - 0x3a9947, - 0x24bb44, - 0x20e785, - 0x365485, - 0x22f845, - 0x20e704, - 0x38f948, - 0x2523c6, - 0x3641c8, - 0x2836c5, - 0x2ee705, - 0x237bc4, - 0x23e083, - 0x300ac4, - 0x37b286, - 0x243cc3, - 0x2ddf84, - 0x24fd85, - 0x248b84, - 0x2a67c4, - 0x22dc42, - 0x232ec6, - 0x3b7ec6, - 0x315fc5, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x3ec5, + 0x63a09, + 0x9983, + 0x25dd04, + 0x216543, + 0x222bc3, + 0x28d4c4, + 0x2296c3, + 0x20cb83, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x1b7409, + 0xe804, + 0x216543, + 0x3242, + 0x222bc3, + 0x2f5503, + 0x20e443, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x2a02, + 0x216543, + 0x222bc3, + 0x343b43, + 0x3636c4, + 0x2b1b84, + 0x2296c3, + 0x20cb83, + 0x201643, + 0x4702, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x284103, + 0xe103, + 0x1b283, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x38bc6, + 0x325d0a, + 0x3453c9, + 0x35fd4b, + 0x36084a, + 0x36918a, + 0x37860b, + 0x39084a, + 0x3979ca, + 0x39ecca, + 0x39ef4b, + 0x3c5589, + 0x3d368a, + 0x3d3acb, + 0x3dfecb, + 0x3e914a, + 0x4042, + 0x216543, + 0x222bc3, + 0x2f5503, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x3ecb, + 0x106a47, + 0x69a08, + 0x19c744, + 0x1db944, + 0x98e48, + 0xedac6, + 0x1481c6, + 0x13a09, + 0x793c8, + 0x216543, + 0x1dc84, + 0x26ff44, + 0x215d42, + 0x21b544, + 0x30eb85, + 0x203b43, + 0x25dd04, + 0x216543, + 0x23ec84, + 0x222bc3, + 0x256d44, + 0x2e8fc4, + 0x2b1b84, + 0x233243, + 0x2296c3, + 0x20cb83, + 0x2655c5, + 0x20e603, + 0x21f6c3, + 0x27d683, + 0x2d1984, + 0x323404, + 0x34bd45, + 0x793c8, + 0x32e744, + 0x3c2086, + 0x30e7c4, + 0x216542, + 0x2c8447, + 0x250707, + 0x254744, + 0x2ee845, + 0x372285, + 0x2b96c5, + 0x2b1b84, + 0x267408, + 0x25d206, + 0x392c88, + 0x287105, + 0x2efe85, + 0x257204, + 0x20cb83, + 0x305504, + 0x3770c6, + 0x24da03, + 0x2d1984, + 0x26fac5, + 0x38fd04, + 0x2aacc4, + 0x213402, + 0x38f846, + 0x3b8fc6, + 0x315f85, 0x2000c2, - 0x24ac43, - 0x34e12402, - 0x21fa44, + 0x253c43, + 0xedc46, + 0x3b616542, + 0x231d44, + 0x63dc5, 0x200382, - 0x23cb03, - 0x20cac2, - 0x217fc3, + 0x216443, + 0x2a9542, + 0x2296c3, 0x2003c2, - 0x2fcf46, - 0x208503, - 0x20aa43, - 0xae888, - 0xae888, - 0x266a83, - 0x1c0443, + 0x301a46, + 0x202b03, + 0x1da785, + 0x201643, + 0x793c8, + 0x793c8, + 0x343b43, + 0x7ca83, 0x2000c2, - 0x35a12402, - 0x266a83, - 0x26e2c3, - 0x3433c3, - 0x22e004, - 0x217fc3, - 0x23e083, - 0xae888, + 0x3c216542, + 0x343b43, + 0x274c43, + 0x34b203, + 0x2093c4, + 0x2296c3, + 0x20cb83, + 0x793c8, 0x2000c2, - 0x36212402, - 0x22ea43, - 0x217fc3, - 0x5803, - 0x23e083, + 0x3ca16542, + 0x216543, + 0x2296c3, + 0xd903, + 0x20cb83, 0x682, - 0x203b42, - 0x21fcc2, - 0x21e1c3, - 0x2f8e43, + 0x2091c2, + 0x22a042, + 0x21b283, + 0x2faf43, 0x2000c2, - 0x146bc5, - 0xae888, - 0x178d87, - 0x212402, - 0x233fc3, - 0x24d704, - 0x2033c3, - 0x266a83, - 0x204903, - 0x23cb03, - 0x217fc3, - 0x213cc3, - 0x23e083, - 0x234fc3, - 0x140d13, - 0x142dd4, - 0x146bc5, - 0x178d87, - 0x1dbc9, - 0x110b86, - 0x121b4b, - 0x36fc6, - 0x54bc7, - 0xe786, + 0x146c05, + 0x793c8, + 0xb46c7, + 0x216542, + 0x222bc3, + 0x256d44, + 0x204f03, + 0x343b43, + 0x20e443, + 0x216443, + 0x2296c3, + 0x20b243, + 0x20cb83, + 0x23d343, + 0x1643, + 0x13ff13, + 0x142f14, + 0x146c05, + 0xb46c7, + 0x1e489, + 0x1e1e06, + 0x19108b, + 0x32f46, + 0x60ac7, + 0x145246, 0x649, - 0x1d818a, - 0x9110d, - 0x127d8c, - 0x1198ca, - 0x15d048, - 0x1b5a05, - 0x1dc08, - 0x13a06, - 0x1ce786, - 0x134c46, + 0x15d3ca, + 0x9560d, + 0x1b60cc, + 0x119cca, + 0x46688, + 0x10f7c5, + 0x1e4c8, + 0x30846, + 0x1d1806, + 0x13906, 0x602, - 0x2293c2, - 0x6f204, - 0xa0e86, - 0x1411d0, - 0x147a54e, - 0x1e46, - 0x696cc, - 0x37b22f0b, - 0x146bc5, - 0x15434b, - 0x37fce6c4, - 0x1c0247, - 0x23c91, - 0x11a7ca, - 0x22ea43, - 0x38285648, - 0x6bc45, - 0xf988, - 0x1ff44, - 0x14c705, - 0x38561cc6, - 0x9b306, - 0xc9b46, - 0x9620a, - 0x96ecc, - 0x1c2043, - 0x1c4104, - 0x38a120c4, - 0x51709, - 0x164347, - 0x1167ca, - 0x14dac89, + 0x213242, + 0x15cb84, + 0x1d4b06, + 0x1255d0, + 0x14dbf0e, + 0x1a1146, + 0x41dcc, + 0x3e37cd0b, + 0x146c05, + 0x152c0b, + 0x3e7d1744, + 0x1c0f87, + 0x2c191, + 0x12140a, + 0x216543, + 0x3ea8ad08, + 0x73605, + 0x89288, + 0x2a2c4, + 0x62e85, + 0x3ec0b186, + 0x1bc60b, + 0xc3d86, + 0x72206, + 0x9a6ca, + 0x16c5cc, + 0x1c2003, + 0x1db944, + 0x3f218004, + 0x5c549, + 0x192e07, + 0xac00a, + 0x14df889, 0x605, - 0x103583, - 0x38e35107, - 0x2a7c5, - 0x153d986, - 0x14731c6, - 0xb3f8c, - 0x104248, - 0x390408c3, - 0xfa24b, - 0x12bd4b, - 0x3964950c, - 0x140ba83, - 0xc96c8, - 0xfa4c5, - 0xc6c09, - 0xeca43, - 0x11fb08, - 0x141b5c6, - 0x8e8c7, - 0x39b60b09, - 0x99c87, - 0xf054a, - 0x3afc6788, - 0x11838d, - 0xff48, - 0x1ac3, - 0x1445009, - 0x3a643, - 0x36fc6, - 0x107788, - 0x13904, - 0x154c85, - 0x1492ec3, - 0x22387, - 0x39e22383, - 0x3a3c78c6, - 0x3a637e84, - 0x3ab09647, - 0x107784, - 0x107784, - 0x107784, - 0x107784, + 0xb6803, + 0x3f63d487, + 0x149f05, + 0x1565b86, + 0x157ac46, + 0x3fb92f4f, + 0xb800c, + 0x107588, + 0x3fc473c3, + 0x10a3c4, + 0xfe88b, + 0x1d694b, + 0x4025240c, + 0x14110c3, + 0xcddc8, + 0xfeb05, + 0xca909, + 0xeb643, + 0x12f7c8, + 0x1426246, + 0x95c87, + 0x4074c6c9, + 0x41a7a6c8, + 0x9dc07, + 0xcf9ca, + 0x41fc9408, + 0x11884d, + 0x12248, + 0x9983, + 0x146a249, + 0x14c203, + 0x32f46, + 0xe91c8, + 0x1a704, + 0x1d8645, + 0xfea83, + 0x1497d03, + 0x20a87, + 0x40a20a83, + 0x40fc2486, + 0x41240644, + 0x4170ba87, + 0xe91c4, + 0xe91c4, + 0xe91c4, + 0xe91c4, + 0x3ec5, + 0x1a18c8, + 0x148209, 0x41, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, 0x2000c2, - 0x212402, - 0x266a83, - 0x209582, - 0x217fc3, - 0x23e083, - 0x208503, - 0x38644f, - 0x38680e, - 0xae888, - 0x22ea43, - 0x44cc7, - 0x233fc3, - 0x266a83, - 0x2191c3, - 0x217fc3, - 0x23e083, - 0x1d84, - 0x157d04, - 0x1b4744, - 0x21afc3, - 0x324007, - 0x207d42, - 0x272549, + 0x216542, + 0x343b43, + 0x2042c2, + 0x2296c3, + 0x20cb83, + 0x202b03, + 0x3835cf, + 0x38398e, + 0x793c8, + 0x216543, + 0x4e6c7, + 0x222bc3, + 0x343b43, + 0x243543, + 0x2296c3, + 0x20cb83, + 0x1a1084, + 0x12dcc4, + 0x9c04, + 0x224503, + 0x3a2347, + 0x202302, + 0x277c09, 0x200ac2, - 0x3a58cb, - 0x2a6b8a, - 0x2aec89, + 0x3a714b, + 0x2e808a, + 0x2ec409, 0x200542, - 0x220306, - 0x244495, - 0x3a5a15, - 0x387d93, - 0x3a5f93, - 0x2272c2, - 0x2272c5, - 0x25f44c, - 0x27ad0b, - 0x277a05, - 0x201802, - 0x239202, - 0x381b06, - 0x203502, - 0x2cf9c6, - 0x21d58d, - 0x22a54c, - 0x38b884, + 0x22a686, + 0x256255, + 0x3a7295, + 0x259fd3, + 0x3a7813, + 0x22d0c2, + 0x22d0c5, + 0x363e0c, + 0x27ff8b, + 0x281405, + 0x202cc2, + 0x30a8c2, + 0x37ed06, + 0x203b42, + 0x2d4b06, + 0x21df0d, + 0x349c8c, + 0x3c4c44, 0x200882, - 0x222b02, - 0x3a51c8, + 0x205242, + 0x27c0c8, 0x200202, - 0x336d46, - 0x39c70f, - 0x357dd0, - 0x229804, - 0x244655, - 0x387f13, - 0x24c943, - 0x369f8a, - 0x20c5c7, - 0x3a1ec9, - 0x316687, - 0x30bf02, + 0x32dd86, + 0x39a18f, + 0x32dd90, + 0x3abc44, + 0x256415, + 0x25a153, + 0x20a883, + 0x3630ca, + 0x211d47, + 0x39f1c9, + 0x3129c7, + 0x328c42, 0x200282, - 0x3c90c6, - 0x204cc2, - 0xae888, - 0x207f42, - 0x208a02, - 0x228fc7, - 0x348187, - 0x348191, - 0x218885, - 0x21888e, - 0x2194cf, - 0x20cfc2, - 0x3236c7, - 0x21b008, - 0x20aac2, - 0x21c942, - 0x227846, - 0x22784f, - 0x26c690, - 0x22c442, - 0x20cf02, - 0x238b48, - 0x214803, - 0x261248, - 0x2eea8d, - 0x20cf03, - 0x3cc248, - 0x28734f, - 0x28770e, - 0x25d54a, - 0x26cb11, - 0x26cf90, - 0x30280d, - 0x302b4c, - 0x3c20c7, - 0x36a107, - 0x37e109, - 0x29a842, - 0x205082, - 0x256b8c, - 0x256e8b, + 0x3cb7c6, + 0x207dc2, + 0x793c8, + 0x202502, + 0x20bd42, + 0x212e47, + 0x38d507, + 0x38d511, + 0x221a05, + 0x221a0e, + 0x22204f, + 0x204242, + 0x2f1c07, + 0x224b48, + 0x2016c2, + 0x2295c2, + 0x227406, + 0x22740f, + 0x23ff50, + 0x237242, + 0x204182, + 0x336588, + 0x210903, + 0x2920c8, + 0x2cb30d, + 0x204183, + 0x3a04c8, + 0x28e48f, + 0x28e84e, + 0x316b8a, + 0x3b3ed1, + 0x3b4350, + 0x21b9cd, + 0x21bd0c, + 0x386987, + 0x363247, + 0x379b89, + 0x20cd02, + 0x202402, + 0x26274c, + 0x262a4b, 0x200d42, - 0x2d38c6, - 0x202282, + 0x2d8306, + 0x20c602, 0x200482, - 0x361e82, - 0x212402, - 0x22f244, - 0x239d87, - 0x22c982, - 0x240307, - 0x241b47, - 0x230a82, - 0x211d02, - 0x244b85, - 0x20da02, - 0x3985ce, - 0x3d068d, - 0x233fc3, - 0x28cf0e, - 0x2bb64d, - 0x35cc43, - 0x203142, - 0x28ac84, - 0x29a802, - 0x223ec2, - 0x3930c5, - 0x3a3b07, - 0x2481c2, - 0x20fa02, - 0x24d307, - 0x251a88, - 0x2ba882, - 0x27cf06, - 0x256a0c, - 0x256d4b, - 0x2091c2, - 0x261d4f, - 0x262110, - 0x26250f, - 0x2628d5, - 0x262e14, - 0x26330e, - 0x26368e, - 0x263a0f, - 0x263dce, - 0x264154, - 0x264653, - 0x264b0d, - 0x27c349, - 0x292a43, - 0x2033c2, - 0x2d2685, - 0x2033c6, + 0x22e702, + 0x216542, + 0x3dbec4, + 0x241787, + 0x213b02, + 0x248d87, + 0x24a7c7, + 0x215a42, + 0x219d82, + 0x24e585, + 0x20c542, + 0x250e0e, + 0x39654d, + 0x222bc3, + 0x243c8e, + 0x2c764d, + 0x3c4143, + 0x2026c2, + 0x2730c4, + 0x2b3802, + 0x213642, + 0x3a6a05, + 0x3acc07, + 0x250d02, + 0x218242, + 0x256947, + 0x25c8c8, + 0x25c082, + 0x29a986, + 0x2625cc, + 0x26290b, + 0x20eb02, + 0x26c34f, + 0x26c710, + 0x26cb0f, + 0x26ced5, + 0x26d414, + 0x26d90e, + 0x26dc8e, + 0x26e00f, + 0x26e3ce, + 0x26e754, + 0x26ec53, + 0x26f10d, + 0x2817c9, + 0x2975c3, + 0x204342, + 0x322205, + 0x204f06, 0x200382, - 0x3451c7, - 0x266a83, + 0x2bfd47, + 0x343b43, 0x200642, - 0x23e108, - 0x26cd51, - 0x26d190, - 0x202182, - 0x291c47, - 0x204b82, - 0x277507, - 0x206902, - 0x207089, - 0x381ac7, - 0x294648, - 0x361b06, - 0x207483, - 0x207485, - 0x234242, + 0x23e448, + 0x3b4111, + 0x3b4550, + 0x202102, + 0x296907, + 0x202642, + 0x25c247, + 0x207bc2, + 0x208309, + 0x37ecc7, + 0x3e5848, + 0x20afc6, + 0x208703, + 0x208705, + 0x225e82, 0x2004c2, - 0x3c94c5, - 0x3b3785, - 0x201482, - 0x219303, - 0x3546c7, - 0x20bdc7, - 0x204d02, - 0x249084, - 0x20eb03, - 0x2f6f89, - 0x20eb08, - 0x202702, - 0x20a682, - 0x26b947, - 0x26ca45, - 0x273508, - 0x2b1347, - 0x209f03, - 0x2a0d06, - 0x30268d, - 0x302a0c, - 0x305e06, - 0x206b02, - 0x208c82, - 0x20b982, - 0x2871cf, - 0x2875ce, - 0x365507, - 0x204482, - 0x388c05, - 0x388c06, - 0x215782, + 0x3cbbc5, + 0x36bd85, + 0x20b402, + 0x237843, + 0x352f87, + 0x3c3c47, + 0x203f02, + 0x38f0c4, + 0x271f43, + 0x33eec9, + 0x3c7348, + 0x209d42, + 0x210442, + 0x22cac7, + 0x231745, + 0x20c708, + 0x327287, + 0x20f543, + 0x3d4986, + 0x21b84d, + 0x21bbcc, + 0x223046, + 0x204202, + 0x31de42, + 0x201582, + 0x28e30f, + 0x28e70e, + 0x372307, + 0x202042, + 0x3d2185, + 0x3d2186, + 0x228882, 0x200bc2, - 0x293506, - 0x206583, - 0x206586, - 0x2d8a45, - 0x2d8a4d, - 0x2d92d5, - 0x2da14c, - 0x2da4cd, - 0x2da812, - 0x20a942, - 0x2720c2, - 0x203882, - 0x36ac46, - 0x204a46, - 0x202542, - 0x203446, + 0x298346, + 0x210783, + 0x3c2c46, + 0x2dd585, + 0x2dd58d, + 0x2de195, + 0x2ded4c, + 0x2df0cd, + 0x2df412, + 0x203742, + 0x277782, + 0x202802, + 0x3437c6, + 0x20e586, + 0x43296084, + 0x201cc2, + 0x204f86, 0x201102, - 0x324805, - 0x202582, - 0x398709, - 0x22ce4c, - 0x22d18b, + 0x3a2b45, + 0x205c02, + 0x250f49, + 0x22d58c, + 0x22d8cb, 0x2003c2, - 0x252e48, - 0x202a42, + 0x25e3c8, + 0x211c02, 0x200a82, - 0x278706, - 0x245ac5, + 0x27de46, + 0x266005, 0x200a87, - 0x22dcc5, - 0x257e45, - 0x201b42, - 0x21dcc2, - 0x205b42, - 0x298c07, - 0x2fd00d, - 0x2fd38c, - 0x235507, - 0x27ce82, - 0x211c82, - 0x3dc788, - 0x248d88, - 0x34f348, - 0x3bb1c4, - 0x372d07, - 0x36aa43, - 0x22d782, - 0x204ac2, - 0x2fe3c9, - 0x30b287, - 0x216982, - 0x278b05, - 0x242c42, - 0x20d402, - 0x2f8b83, - 0x2f8b86, - 0x306d42, - 0x308142, + 0x2fca45, + 0x2824c5, + 0x23d7c2, + 0x21e582, + 0x208402, + 0x29f107, + 0x301b0d, + 0x301e8c, + 0x25d707, + 0x29a902, + 0x219d02, + 0x3e8008, + 0x38ff08, + 0x2e5e08, + 0x3bc204, + 0x342ac7, + 0x363b83, + 0x206bc2, + 0x203482, + 0x302889, + 0x233347, + 0x2037c2, + 0x27e245, + 0x24cec2, + 0x204682, + 0x30b0c3, + 0x30b0c6, + 0x309602, + 0x30a282, 0x200402, - 0x3616c6, - 0x34de07, - 0x216782, + 0x2abc86, + 0x273007, + 0x213582, 0x200902, - 0x26108f, - 0x28cd4d, - 0x28fd0e, - 0x2bb4cc, - 0x208842, - 0x205302, - 0x361945, - 0x325d86, + 0x291f0f, + 0x243acd, + 0x39e2ce, + 0x2c74cc, + 0x20cbc2, + 0x202a82, + 0x20ae05, + 0x324106, 0x200b82, - 0x205502, + 0x205d42, 0x200682, - 0x28d0c4, - 0x2c14c4, - 0x389fc6, - 0x207742, - 0x28d807, - 0x23c643, - 0x23c648, - 0x23d1c8, - 0x245207, - 0x249946, - 0x20ab02, - 0x2186c3, - 0x2186c7, - 0x292246, - 0x2ecb85, - 0x27a1c8, - 0x2018c2, - 0x3c1007, - 0x207142, - 0x25cdc2, - 0x201702, - 0x219649, - 0x203c02, - 0x10acc8, - 0x201f42, - 0x235783, - 0x3599c7, + 0x243e44, + 0x2c4b44, + 0x36c186, + 0x201ec2, + 0x292d47, + 0x23f643, + 0x23f648, + 0x2408c8, + 0x24ad47, + 0x251646, + 0x204ac2, + 0x2118c3, + 0x2118c7, + 0x28a6c6, + 0x2ed245, + 0x27f908, + 0x202d82, + 0x35b4c7, + 0x203782, + 0x352902, + 0x204102, + 0x2221c9, + 0x24b302, + 0x14448, + 0x201b82, + 0x25d983, + 0x32e887, 0x200f02, - 0x22cfcc, - 0x22d2cb, - 0x305e86, - 0x3034c5, - 0x203d02, - 0x202a82, - 0x2cb146, - 0x20dd03, - 0x36a307, - 0x2b3f42, + 0x22d70c, + 0x22da0b, + 0x2abf06, + 0x223e85, + 0x43609d83, + 0x22bdc2, + 0x202a02, + 0x2cf7c6, + 0x209003, + 0x363447, + 0x211482, 0x2008c2, - 0x244315, - 0x3a5bd5, - 0x387c53, - 0x3a6113, - 0x2596c7, - 0x28b111, - 0x2908d0, - 0x2f7b92, - 0x29b711, - 0x2a0548, - 0x2a0550, - 0x2a2c8f, - 0x2a6953, - 0x2aea52, - 0x2b8190, - 0x36f14f, - 0x3a4112, - 0x2bac51, - 0x2bfa93, - 0x3426d2, - 0x2d868f, - 0x2e010e, - 0x2e3512, - 0x2e43d1, - 0x2e79cf, - 0x2ea38e, - 0x2ed451, - 0x2fa9d0, - 0x304412, - 0x307211, - 0x309090, - 0x321ecf, - 0x37ab11, - 0x3d2fd0, - 0x33fac6, - 0x314b47, - 0x2153c7, - 0x202402, - 0x288985, - 0x3135c7, - 0x21fcc2, - 0x208d82, - 0x22b8c5, - 0x208743, - 0x26ec86, - 0x2fd1cd, - 0x2fd50c, - 0x2034c2, - 0x25f2cb, - 0x27abca, - 0x22718a, - 0x2ca549, - 0x2fc34b, - 0x2b148d, - 0x313ccc, - 0x240cca, - 0x2466cc, - 0x24e88b, - 0x27784c, - 0x27bd0e, - 0x29cb4b, - 0x2b668c, - 0x2ec543, - 0x2edf06, - 0x3c6782, - 0x305102, - 0x25cb43, - 0x201502, - 0x204243, - 0x353446, - 0x262a87, - 0x2c3846, - 0x2158c8, - 0x354548, - 0x3800c6, - 0x20e482, - 0x31598d, - 0x315ccc, - 0x32bf07, - 0x319707, - 0x223542, - 0x216b82, - 0x203b02, - 0x284302, - 0x336c56, - 0x33b795, - 0x3407d6, - 0x3437d3, - 0x343e92, - 0x35bc93, - 0x35de52, - 0x3b6bcf, - 0x3c5758, - 0x3c6257, - 0x3c6c59, - 0x3c8b18, - 0x3c96d8, - 0x3cb9d7, - 0x3cc457, - 0x3ce196, - 0x3d1cd3, - 0x3d2755, - 0x3d33d2, - 0x3d3853, - 0x212402, - 0x217fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x208503, + 0x2560d5, + 0x3a7455, + 0x259e93, + 0x3a7993, + 0x279587, + 0x294d91, + 0x2f9010, + 0x2a27d2, + 0x2a7411, + 0x2aae48, + 0x2aae50, + 0x372f4f, + 0x3a5e53, + 0x3abf92, + 0x2c2690, + 0x2bec4f, + 0x2c2bd2, + 0x2c4191, + 0x2d73d3, + 0x2dd112, + 0x2e3d4f, + 0x2e664e, + 0x2e7e92, + 0x2ec211, + 0x2ed70f, + 0x2f58ce, + 0x2f7251, + 0x2f8450, + 0x2ff012, + 0x307751, + 0x309ad0, + 0x30b50f, + 0x366211, + 0x355510, + 0x37f0c6, + 0x31e787, + 0x234ac7, + 0x201c02, + 0x28ce85, + 0x313507, + 0x22a042, + 0x203f82, + 0x3d0545, + 0x228183, + 0x35c606, + 0x301ccd, + 0x30200c, + 0x205002, + 0x363c8b, + 0x27fe4a, + 0x22cf8a, + 0x2ceb09, + 0x300a8b, + 0x3273cd, + 0x313c0c, + 0x2477ca, + 0x24ec8c, + 0x27aa4b, + 0x28124c, + 0x28418e, + 0x2a0b0b, + 0x2eafcc, + 0x2f3103, + 0x2ef706, + 0x3c9402, + 0x308542, + 0x2651c3, + 0x2017c2, + 0x23d203, + 0x351d46, + 0x26d087, + 0x2e2846, + 0x3aad48, + 0x352e08, + 0x306d06, + 0x208e42, + 0x31594d, + 0x315c8c, + 0x3d6b07, + 0x319b07, + 0x221e02, + 0x21f8c2, + 0x211842, + 0x289c82, + 0x3377d6, + 0x33c555, + 0x33f9d6, + 0x344113, + 0x3447d2, + 0x356dd3, + 0x357512, + 0x3b7ccf, + 0x3c6b58, + 0x3c8ed7, + 0x3c98d9, + 0x3cb218, + 0x3cbdd8, + 0x3cccd7, + 0x3ced97, + 0x3d1216, + 0x3d6013, + 0x3d6f55, + 0x3d77d2, + 0x3d7c53, + 0x30182, + 0x43a13a04, + 0x43fc4b88, + 0x3ec5, + 0x216542, + 0x2296c3, + 0x386c2, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x202b03, 0x2000c2, - 0x202642, - 0x3ce98545, - 0x3d25ef05, - 0x3d73ed86, - 0xae888, - 0x3dac0105, - 0x212402, - 0x204542, - 0x3de5de45, - 0x3e285fc5, - 0x3e687a87, - 0x3ea87dc9, - 0x3ef4da84, + 0x2070c2, + 0x44e9bcc5, + 0x4529b285, + 0x4567ad86, + 0x793c8, + 0x45ac3205, + 0x216542, + 0x2104c2, + 0x45f336c5, + 0x4628b685, + 0x4668c587, + 0x46a93f89, + 0x46e1eb44, 0x200382, 0x200642, - 0x3f25bf05, - 0x3f69e949, - 0x3fb36248, - 0x3feb87c5, - 0x403513c7, - 0x40623708, - 0x40b08c85, - 0x40e9f486, - 0x413a9a89, - 0x416dd6c8, - 0x41ad02c8, - 0x41e9ef8a, - 0x422ef084, - 0x426ad705, - 0x42acc788, - 0x42e48985, - 0x214882, - 0x4324bd03, - 0x436abe06, - 0x43a6af08, - 0x43ef4246, - 0x4434df48, - 0x447af006, - 0x44a463c4, - 0x44e03182, - 0x45707b87, - 0x45ab43c4, - 0x45e81487, - 0x463da087, + 0x4725a945, + 0x4769b3c9, + 0x47b36dc8, + 0x47ebb2c5, + 0x4834ff07, + 0x4861cf88, + 0x48b18f85, + 0x48e21486, + 0x4924b649, + 0x496f9ec8, + 0x49ad5408, + 0x49ea4e8a, + 0x4a387144, + 0x4a6b2605, + 0x4aad1108, + 0x4ae876c5, + 0x21ab82, + 0x4b2e3303, + 0x4b6aff46, + 0x4bba9148, + 0x4bf53fc6, + 0x4c273148, + 0x4c7da086, + 0x4ca4fb84, + 0x4ce04cc2, + 0x4d6e2c47, + 0x4dab7d44, + 0x4de85287, + 0x4e3e3047, 0x2003c2, - 0x466a3e85, - 0x46a15cc4, - 0x46faaa07, - 0x4723c0c7, - 0x4768aac6, - 0x47a86b45, - 0x47e9ea47, - 0x482dd548, - 0x487da407, - 0x48adcb49, - 0x48ed9845, - 0x4931d047, - 0x49697b86, - 0x27c4b, - 0x49b47b08, - 0x22800d, - 0x25c089, - 0x279d4b, - 0x27b8cb, - 0x2afecb, - 0x39b08b, + 0x4e6a8485, + 0x4ea7fa84, + 0x4efafd07, + 0x4f23cc07, + 0x4f690046, + 0x4fa8c145, + 0x4fea2f07, + 0x502cdf88, + 0x507e33c7, + 0x50abb909, + 0x50ee3405, + 0x5131f287, + 0x5169b0c6, + 0x2780b, + 0x51a2e2c8, + 0x230c4d, + 0x271d09, + 0x27f48b, + 0x29868b, + 0x2b744b, + 0x2d24cb, + 0x32430b, + 0x3245cb, + 0x324a89, 0x325f8b, 0x32624b, - 0x326709, - 0x32770b, - 0x3279cb, - 0x32850b, - 0x32910a, - 0x32964a, - 0x329c4c, - 0x32e6cb, - 0x32ec0a, - 0x34228a, - 0x34d34e, - 0x34e94e, - 0x34ecca, - 0x350b0a, - 0x351b4b, - 0x351e0b, - 0x35290b, - 0x372ecb, - 0x3734ca, - 0x37418b, - 0x37444a, - 0x3746ca, - 0x37494a, - 0x394a0b, - 0x39bbcb, - 0x39ed4e, - 0x39f0cb, - 0x3a65cb, - 0x3a73cb, - 0x3ab74a, - 0x3ab9c9, - 0x3abc0a, - 0x3ad9ca, - 0x3c514b, - 0x3d00cb, - 0x3d0aca, - 0x3d170b, - 0x3d7a4b, - 0x3e07cb, - 0x49e89188, - 0x4a290209, - 0x4a6a7249, - 0x4aaefcc8, - 0x35f145, - 0x204083, - 0x251f44, - 0x34e385, - 0x34d7c6, - 0x367645, - 0x28f384, - 0x3450c8, - 0x31f645, - 0x299784, - 0x203787, - 0x2a634a, - 0x37738a, - 0x365607, - 0x26b0c7, - 0x2e7ec7, - 0x288047, - 0x33a405, - 0x20e506, - 0x2f34c7, - 0x20fd84, - 0x3ba146, - 0x3ba046, - 0x3dccc5, - 0x389dc4, - 0x29ffc6, - 0x2a5407, - 0x2671c6, - 0x31a487, - 0x235e43, - 0x3a2246, - 0x238d85, - 0x287b87, - 0x26fe0a, - 0x237784, - 0x2219c8, - 0x39a2c9, - 0x2d6b87, - 0x3bba06, - 0x203f48, - 0x2f4989, - 0x3a2084, - 0x2d2a04, - 0x313005, - 0x21e388, - 0x2d6e47, - 0x2b7689, - 0x3690c8, - 0x31b8c6, - 0x266cc6, - 0x2a0b88, - 0x371c86, - 0x25ef05, - 0x28ab86, - 0x281f48, - 0x2870c6, - 0x255f0b, - 0x2be206, - 0x2a280d, - 0x205385, - 0x2b4286, - 0x21f585, - 0x2bc949, - 0x2e0cc7, - 0x3cd248, - 0x39dec6, - 0x2a1949, - 0x2c1246, - 0x26fd85, - 0x2a9606, - 0x2d5506, - 0x2db549, - 0x2c8186, - 0x2a6047, - 0x2d5bc5, - 0x208a43, - 0x22d805, - 0x395c07, - 0x25fac6, - 0x205289, - 0x33ed86, - 0x281686, - 0x226049, - 0x28a589, - 0x2aa947, - 0x207648, - 0x29b149, - 0x288608, - 0x3a7646, - 0x2e5285, - 0x27dd4a, - 0x281706, - 0x347446, - 0x2deb05, - 0x253708, - 0x2f5707, - 0x23114a, - 0x24df06, - 0x2e2785, - 0x3086c6, - 0x20d647, - 0x3bb8c7, - 0x21a3c5, - 0x26ff45, - 0x26c506, - 0x273b06, - 0x2b0d46, - 0x2ccc44, - 0x289b09, - 0x291a06, - 0x306f0a, - 0x30c148, - 0x31cd48, - 0x37738a, - 0x2ef805, - 0x2a5345, - 0x3cac88, - 0x2c7e88, - 0x2398c7, - 0x36ee86, - 0x339788, - 0x20ee87, - 0x27a408, - 0x2c6806, - 0x28bac8, - 0x29de06, - 0x283847, - 0x23b3c6, - 0x29ffc6, - 0x27438a, - 0x305f86, - 0x2e5289, - 0x2a7746, - 0x22910a, - 0x2463c9, - 0x2fd9c6, - 0x2c9144, - 0x2d274d, - 0x285e07, - 0x3325c6, - 0x2d0185, - 0x2c12c5, - 0x396906, - 0x2a9b89, - 0x2c09c7, - 0x282946, - 0x2ced06, - 0x28f409, - 0x288d84, - 0x23f644, - 0x3b53c8, - 0x237ac6, - 0x2a9708, - 0x322708, - 0x3a9f87, - 0x358b89, - 0x3c9f87, - 0x2bffca, - 0x2fee8f, - 0x2b230a, - 0x3e22c5, - 0x282185, - 0x21c3c5, - 0x229747, - 0x20d203, - 0x207848, - 0x355606, - 0x355709, - 0x2f3dc6, - 0x2db387, - 0x2a1709, - 0x3cd148, - 0x2debc7, - 0x325343, - 0x35f1c5, - 0x20d185, - 0x2cca8b, - 0x248a44, - 0x238344, - 0x27d506, - 0x325507, - 0x396e8a, - 0x24bd87, - 0x298787, - 0x285fc5, - 0x3d5c05, - 0x296ac9, - 0x29ffc6, - 0x24bc0d, - 0x273445, - 0x2c3c03, - 0x2059c3, - 0x3617c5, - 0x33a085, - 0x203f48, - 0x283287, - 0x23f3c6, - 0x2a6ec6, - 0x22bbc5, - 0x234287, - 0x25eb47, - 0x252287, - 0x2ad78a, - 0x3a2308, - 0x2ccc44, - 0x286e47, + 0x3283cb, + 0x328fca, + 0x32950a, + 0x329b0c, + 0x32fecb, + 0x33040a, + 0x34218a, + 0x34c2ce, + 0x34d44e, + 0x34d7ca, + 0x34f78a, + 0x35044b, + 0x35070b, + 0x35120b, + 0x36e7cb, + 0x36edca, + 0x36fa8b, + 0x36fd4a, + 0x36ffca, + 0x37024a, + 0x391a8b, + 0x39944b, + 0x39bc0e, + 0x39bf8b, + 0x3a7e4b, + 0x3a9ecb, + 0x3ada8a, + 0x3add09, + 0x3adf4a, + 0x3afa0a, + 0x3c654b, + 0x3d3d8b, + 0x3d4f4a, + 0x3d5a4b, + 0x3dbc4b, + 0x3e8b8b, + 0x51e8d908, + 0x522946c9, + 0x526ab689, + 0x52af0788, + 0x359645, + 0x20dec3, + 0x25cd84, + 0x2cbb05, + 0x21e886, + 0x221205, + 0x293a44, + 0x2bfc48, + 0x31fc85, + 0x29d144, + 0x20d007, + 0x2aa84a, + 0x24104a, + 0x372407, + 0x3a9307, + 0x2ec787, + 0x291247, + 0x313305, + 0x219686, + 0x372c47, + 0x35a804, + 0x2c9046, + 0x3dc846, + 0x203345, + 0x333144, + 0x2a80c6, + 0x2a9a07, + 0x22fa86, + 0x2b6407, + 0x23e343, + 0x39f546, + 0x3367c5, + 0x28c687, + 0x27588a, + 0x23e544, + 0x21ad88, + 0x2ba709, + 0x2c9607, + 0x3c6006, + 0x267608, + 0x37ae89, + 0x39f384, + 0x322584, + 0x30c2c5, + 0x21b448, + 0x2dbb47, + 0x30a449, + 0x3d8f48, + 0x31b5c6, + 0x361b46, + 0x2a5ac8, + 0x36d846, + 0x29b285, + 0x290106, + 0x285988, + 0x28e206, + 0x261acb, + 0x38a106, + 0x2a6f8d, + 0x20d485, + 0x2b7c06, + 0x21d045, + 0x3c8609, + 0x2e4747, + 0x3d2008, + 0x3c4f86, + 0x2a6209, + 0x2c48c6, + 0x275805, + 0x216b86, + 0x2d7cc6, + 0x2e02c9, + 0x2cc886, + 0x31d5c7, + 0x2dec05, + 0x20c043, + 0x261c45, + 0x2bc687, + 0x3640c6, + 0x20d389, + 0x27ad86, + 0x285486, + 0x226a89, + 0x28fb09, + 0x2aea87, + 0x201dc8, + 0x29f949, + 0x28cb08, + 0x3e5e06, + 0x2e8945, + 0x2834ca, + 0x285506, + 0x3cfc86, + 0x2e2b85, + 0x25ec08, + 0x358d07, + 0x239a8a, + 0x257686, + 0x303885, + 0x30aa86, + 0x2048c7, + 0x3c5ec7, + 0x2ac545, + 0x2759c5, + 0x23fdc6, + 0x36a446, + 0x269f46, + 0x2d15c4, + 0x28ebc9, + 0x2966c6, + 0x3097ca, + 0x232148, + 0x31ef88, + 0x24104a, + 0x2421c5, + 0x2a9945, + 0x3def48, + 0x2ce108, + 0x23ae87, + 0x288686, + 0x33a388, + 0x20b607, + 0x28d208, + 0x2ca506, + 0x290f08, + 0x2a1dc6, + 0x287287, + 0x29ea46, + 0x2a80c6, + 0x2313ca, + 0x3dbf46, + 0x2e8949, + 0x2abb86, + 0x212f8a, + 0x24fb89, + 0x3024c6, + 0x2cd844, + 0x3222cd, + 0x28b4c7, + 0x39fe06, + 0x2d52c5, + 0x2c4945, + 0x394246, + 0x2ae109, + 0x369787, + 0x286386, + 0x393386, + 0x293ac9, + 0x2e37c4, + 0x3025c4, + 0x30f188, + 0x2fbec6, + 0x2adc88, + 0x216b08, + 0x260507, + 0x30e2c9, + 0x347a47, + 0x2c30ca, + 0x30334f, + 0x2396ca, + 0x20ac05, + 0x285bc5, + 0x216945, + 0x3cd547, + 0x204483, + 0x201fc8, + 0x2f6086, + 0x2f6189, + 0x2f5f86, + 0x2e0c87, + 0x2a5fc9, + 0x3d1f08, + 0x3c87c7, + 0x322d43, + 0x3596c5, + 0x204405, + 0x2d140b, + 0x287784, + 0x300084, + 0x282c86, + 0x322f07, + 0x3947ca, + 0x3b0887, + 0x29bf07, + 0x28b685, + 0x3de685, + 0x2934c9, + 0x2a80c6, + 0x3b070d, + 0x354745, + 0x2c7183, + 0x20dac3, + 0x258b05, + 0x33ac85, + 0x267608, + 0x286cc7, + 0x245246, + 0x2ab306, + 0x235745, + 0x23c847, + 0x3e4107, + 0x25d0c7, + 0x2b268a, + 0x39f608, + 0x2d15c4, + 0x28df87, + 0x28a847, + 0x35df46, + 0x2a1447, + 0x2ea808, + 0x35d588, + 0x27a306, + 0x3a9548, + 0x2cc904, + 0x372c46, + 0x266286, + 0x246046, + 0x2025c6, + 0x214ac4, + 0x291306, + 0x2d3e46, + 0x2a5386, + 0x224006, + 0x20d986, + 0x2ea646, + 0x245148, + 0x2c6648, + 0x2e5688, + 0x221408, + 0x3deec6, + 0x20f1c5, + 0x27b9c6, + 0x2bb345, + 0x397087, + 0x246005, + 0x217943, + 0x26a545, + 0x23b844, + 0x20dac5, + 0x223a03, + 0x2c4707, + 0x3aa1c8, + 0x2b64c6, + 0x2d630d, + 0x285b86, + 0x2a4905, + 0x2221c3, + 0x2d0ac9, + 0x2e3946, + 0x2a34c6, + 0x29c484, + 0x239647, + 0x2f43c6, + 0x303ac5, + 0x244243, + 0x211184, + 0x28aa06, + 0x219784, + 0x3c11c8, + 0x2064c9, + 0x369d09, + 0x2ada8a, + 0x2495cd, + 0x23e8c7, + 0x206986, + 0x21e5c4, + 0x293f89, + 0x2924c8, + 0x294546, + 0x242b06, + 0x2a1447, + 0x2c3b06, + 0x223686, + 0x3d0c06, + 0x3e30ca, + 0x21cf88, + 0x234885, + 0x245749, + 0x270b0a, + 0x33b008, + 0x2a8cc8, + 0x2a3448, + 0x3e450c, + 0x3996c5, + 0x2ab588, + 0x2ca006, + 0x29dac6, + 0x2dab47, + 0x3b0785, + 0x290285, + 0x369bc9, + 0x210b07, + 0x2f6145, + 0x2286c7, + 0x20dac3, + 0x2dc505, + 0x229108, + 0x2cc507, + 0x2a8b89, + 0x2e1dc5, + 0x30a804, + 0x31dc08, + 0x2cb5c7, + 0x3c8988, + 0x22aa48, + 0x392005, + 0x353b46, + 0x2ab406, + 0x30c689, + 0x266387, + 0x2bbb86, + 0x2585c7, + 0x215103, + 0x21eb44, + 0x2e6fc5, + 0x23c984, + 0x2526c4, + 0x28db87, + 0x2743c7, + 0x286544, + 0x2a89d0, + 0x333847, + 0x3de685, + 0x25084c, + 0x22a804, + 0x2c1108, + 0x287189, + 0x2be9c6, + 0x32f908, + 0x27a484, + 0x282f88, + 0x23a086, + 0x231248, + 0x2a93c6, + 0x2d2d4b, + 0x331b85, + 0x2e6e48, + 0x21a484, + 0x28f78a, + 0x2a8b89, + 0x29e946, + 0x21b6c8, + 0x2657c5, + 0x2d00c4, + 0x2c1006, + 0x25cf88, + 0x28d908, + 0x3356c6, + 0x36c104, + 0x283446, + 0x347ac7, 0x285187, - 0x363306, - 0x29d487, - 0x2ebd88, - 0x3d8348, - 0x29c3c6, - 0x26b308, - 0x2c8204, - 0x2f34c6, - 0x250dc6, - 0x3d5486, - 0x208006, - 0x218e84, - 0x288106, - 0x2cf246, - 0x2a0386, - 0x24bc06, - 0x205886, - 0x2a7e46, - 0x23f2c8, - 0x2c2a48, - 0x2e1e88, - 0x367848, - 0x3cac06, - 0x210ec5, - 0x22d7c6, - 0x2b8845, - 0x399107, - 0x295d45, - 0x2119c3, - 0x2e5f45, - 0x235fc4, - 0x2059c5, - 0x202a43, - 0x3c4bc7, - 0x399d08, - 0x31a546, - 0x34490d, - 0x282146, - 0x29f945, - 0x219643, - 0x2cc149, - 0x288f06, - 0x23b1c6, - 0x3b2144, - 0x2b2287, - 0x3611c6, - 0x23f845, - 0x270483, - 0x20b344, - 0x285346, - 0x20e604, - 0x275548, - 0x204609, - 0x32e489, - 0x2a950a, - 0x29738d, - 0x23e587, - 0x3c2cc6, - 0x21dd04, - 0x287dc9, - 0x28e308, - 0x290086, - 0x23abc6, - 0x29d487, - 0x2c98c6, - 0x226c46, - 0x25dfc6, - 0x3da10a, - 0x223708, - 0x2ef705, - 0x356c09, - 0x2d75ca, - 0x30cd48, - 0x2a46c8, - 0x299fc8, - 0x2b45cc, - 0x395905, - 0x2a7148, - 0x2c2d46, - 0x2e1446, - 0x2d5707, - 0x24bc85, - 0x28ad05, - 0x32e349, - 0x214207, - 0x3556c5, - 0x2284c7, - 0x2059c3, - 0x2d7a85, - 0x224148, - 0x2d9047, - 0x2a4589, - 0x2e5145, - 0x311404, - 0x2ab1c8, - 0x2eed47, - 0x2ded88, - 0x2206c8, - 0x2b5285, - 0x21f746, - 0x2a6fc6, - 0x3c2909, - 0x250ec7, - 0x2b8cc6, - 0x355347, - 0x208683, - 0x34da84, - 0x2dc405, - 0x2343c4, - 0x24b684, - 0x38fc47, - 0x26da47, - 0x282b04, - 0x2a43d0, - 0x207bc7, - 0x3d5c05, - 0x3b3c8c, - 0x220484, - 0x31e048, - 0x283749, - 0x3d78c6, - 0x31fc48, - 0x27d804, - 0x27d808, - 0x231746, - 0x274208, - 0x2a38c6, - 0x39b90b, - 0x330685, - 0x2dc288, - 0x213684, - 0x28988a, - 0x2a4589, - 0x23b2c6, - 0x2c2f48, - 0x2592c5, - 0x2cb744, - 0x31df46, - 0x252148, - 0x289188, - 0x333e86, - 0x389f44, - 0x27dcc6, - 0x3ca007, - 0x281387, - 0x29d48f, - 0x346f07, - 0x2fda87, - 0x388ac5, - 0x377ac5, - 0x2aa609, - 0x2f7786, - 0x38fe85, - 0x28a887, - 0x2d5988, - 0x302545, - 0x23b3c6, - 0x30bf88, - 0x2f424a, - 0x37e648, - 0x293287, - 0x2ff2c6, - 0x356bc6, + 0x2a144f, + 0x346f47, + 0x395747, + 0x368945, + 0x3410c5, + 0x2ae749, + 0x2f8c06, + 0x28c7c5, + 0x28fe07, + 0x2de9c8, + 0x219805, + 0x29ea46, + 0x231f88, + 0x353fca, + 0x32c888, + 0x2980c7, + 0x303786, + 0x245706, 0x2003c3, - 0x20c483, - 0x2d7789, - 0x29afc9, - 0x2dca46, - 0x2e5145, - 0x2b4448, - 0x2c2f48, - 0x2a3508, - 0x25e04b, - 0x344b47, - 0x3211c9, - 0x29d708, - 0x3505c4, - 0x3d50c8, - 0x295909, - 0x2b8fc5, - 0x229647, - 0x34db05, - 0x289088, - 0x2983cb, - 0x29e790, - 0x2b3e05, - 0x2135cc, - 0x23f585, - 0x25e883, - 0x2b6486, - 0x2ce3c4, - 0x23b686, - 0x2a5407, - 0x203d44, - 0x243208, - 0x20770d, - 0x3224c5, - 0x23e5c4, - 0x2b5684, - 0x2b5689, + 0x211c03, + 0x270cc9, + 0x29f7c9, + 0x2bb806, + 0x2e1dc5, + 0x3a97c8, + 0x21b6c8, + 0x2a7d08, + 0x3d0c8b, + 0x2d6547, + 0x31d409, + 0x2a16c8, + 0x329f44, + 0x3e3a48, + 0x299dc9, + 0x2bbe85, + 0x3cd447, + 0x21ebc5, + 0x28d808, + 0x29bb4b, + 0x2a2c50, + 0x2b7845, + 0x21a3cc, + 0x245405, + 0x28b703, + 0x2b49c6, + 0x2d3004, + 0x27fb86, + 0x2a9a07, + 0x221404, + 0x24cf48, + 0x201e8d, + 0x342545, + 0x23e904, + 0x2b60c4, + 0x395489, + 0x2afa88, + 0x332047, + 0x23a108, + 0x28ec88, + 0x286685, + 0x3cd987, + 0x286607, + 0x2f6447, + 0x2759c9, + 0x3c3009, + 0x375346, + 0x21bf06, + 0x28fec6, + 0x31a6c5, + 0x3c7104, + 0x3cf306, + 0x3d9dc6, + 0x2866c8, + 0x20458b, + 0x2fba47, + 0x21e5c4, + 0x2f4306, + 0x2eab47, + 0x38c785, + 0x3a1b45, + 0x266844, + 0x3c2f86, + 0x3cf388, + 0x293f89, + 0x254986, + 0x2922c8, + 0x303b86, + 0x360fc8, + 0x3603cc, + 0x286546, + 0x2a45cd, + 0x2a4a4b, + 0x31d685, + 0x3e4247, + 0x2cc986, + 0x3c5d88, + 0x3753c9, + 0x21d3c8, + 0x3de685, + 0x2893c7, + 0x28cc08, + 0x3c3709, + 0x2f4046, + 0x26af8a, + 0x3c5b08, + 0x21d20b, + 0x2d668c, + 0x283088, + 0x28a286, + 0x22de48, + 0x353c47, + 0x224e49, + 0x29b2cd, + 0x2a7fc6, + 0x3dc948, + 0x2c6509, + 0x2d16c8, + 0x291008, + 0x2d4c8c, + 0x2d5947, + 0x2d7887, + 0x275805, + 0x2c9987, + 0x2de888, + 0x2c1086, + 0x25480c, + 0x306808, + 0x2e1708, + 0x3cf646, + 0x327e47, + 0x375544, + 0x221408, + 0x29594c, + 0x243f8c, + 0x20ac85, + 0x2033c7, + 0x36c086, + 0x327dc6, + 0x39d8c8, + 0x224dc4, + 0x22fa8b, + 0x292e8b, + 0x303786, + 0x201d07, + 0x208805, + 0x27d905, + 0x22fbc6, + 0x265785, + 0x287745, + 0x2e0107, + 0x223a09, + 0x36a604, + 0x247245, + 0x30b005, + 0x3477c8, + 0x3a89c5, + 0x2d7089, + 0x3982c7, + 0x3982cb, + 0x302206, + 0x244e89, + 0x333088, + 0x2931c5, + 0x2f6548, + 0x3c3048, + 0x283d07, + 0x2455c7, + 0x28dc09, + 0x231187, + 0x29a109, + 0x2b984c, + 0x2bb808, + 0x2bf649, + 0x2c0787, + 0x28ed49, + 0x38fbc7, + 0x2d6788, + 0x3c1585, + 0x372bc6, + 0x2d5308, + 0x2fa4c8, + 0x2709c9, + 0x287787, + 0x27e305, + 0x207789, + 0x31f546, + 0x29b0c4, + 0x37f506, + 0x3a8fc8, + 0x23bc07, + 0x204788, + 0x3a9609, + 0x353907, + 0x2a7e46, + 0x3e3d84, + 0x26a5c9, + 0x3cd808, + 0x3cf507, + 0x291846, + 0x2044c6, + 0x3cfc04, + 0x2f3b46, + 0x207843, + 0x331709, + 0x331b46, + 0x2b7e85, + 0x2ab306, + 0x224185, + 0x28d088, + 0x205387, + 0x3c2786, + 0x333706, + 0x31ef88, + 0x2ae8c7, + 0x2a8005, + 0x2a87c8, + 0x3d4188, + 0x3c5b08, + 0x2452c5, + 0x372c46, + 0x369ac9, + 0x30c504, + 0x22400b, + 0x22338b, + 0x234789, + 0x20dac3, + 0x263745, + 0x2b62c6, + 0x246508, + 0x2fb584, + 0x2b64c6, + 0x2b27c9, + 0x3201c5, + 0x2e0046, + 0x2cb5c6, + 0x21b6c4, + 0x2a8e4a, + 0x2b7dc8, + 0x2fa4c6, + 0x371545, + 0x201b87, + 0x33aec7, + 0x353b44, + 0x2235c7, + 0x245fc4, + 0x245fc6, + 0x202003, + 0x2759c5, + 0x2bdc85, + 0x347188, + 0x28e145, + 0x286289, + 0x221247, + 0x22124b, + 0x2afd4c, + 0x2b034a, + 0x34ff07, + 0x20ac43, + 0x284b88, + 0x302585, + 0x219885, + 0x359784, + 0x2d6686, + 0x287186, + 0x2f3b87, + 0x26030b, + 0x214ac4, + 0x368a84, + 0x2bf884, + 0x2dfe06, + 0x221404, + 0x21b548, + 0x359585, + 0x249c05, + 0x2a7c47, + 0x3e4349, + 0x33ac85, + 0x39424a, + 0x2deb09, + 0x2ae3ca, + 0x3e3209, + 0x31c284, + 0x393445, + 0x2c3c08, + 0x3afdcb, + 0x30c2c5, + 0x216c86, + 0x24a884, + 0x2867c6, + 0x353789, + 0x2eac47, + 0x27af48, + 0x249946, + 0x347a47, + 0x28d908, + 0x3780c6, + 0x3e3e04, + 0x3b2687, + 0x384f05, + 0x396107, + 0x221484, + 0x2cc906, + 0x3aea88, + 0x2a4c08, + 0x32a647, + 0x306e08, + 0x2a1e85, + 0x20d904, + 0x240f48, + 0x29ae44, + 0x2168c5, + 0x3ae984, + 0x20b707, + 0x296787, + 0x28ee88, + 0x322f46, + 0x28e0c5, + 0x286088, + 0x32ca88, + 0x2ad9c9, + 0x223686, + 0x239b08, + 0x28f60a, + 0x38c808, + 0x318f85, + 0x27bbc6, 0x2adfc8, - 0x330b47, - 0x2317c8, - 0x289bc8, - 0x282c45, - 0x27ee47, - 0x282bc7, - 0x3559c7, - 0x26ff49, - 0x25e649, - 0x210706, - 0x302d46, - 0x28a946, - 0x326e85, - 0x3c5d04, - 0x3cc9c6, - 0x3d4e86, - 0x282c88, - 0x20d30b, - 0x237647, - 0x21dd04, - 0x361106, - 0x2ec0c7, - 0x2a7a45, - 0x324a85, - 0x267c04, - 0x25e5c6, - 0x3cca48, - 0x287dc9, - 0x261846, - 0x28e108, - 0x23f906, - 0x365f48, - 0x37904c, - 0x282b06, - 0x29f60d, - 0x29fa8b, - 0x2a6105, - 0x25ec87, - 0x2c8286, - 0x3bb788, - 0x210789, - 0x38a7c8, - 0x3d5c05, - 0x20fac7, - 0x288708, - 0x3c7c49, - 0x360e46, - 0x26174a, - 0x3bb508, - 0x38a60b, - 0x22398c, - 0x27d908, - 0x284906, - 0x27e848, - 0x2f3ec7, - 0x347049, - 0x35150d, - 0x29fec6, - 0x30ef48, - 0x2c2909, - 0x2ccd48, - 0x28bbc8, - 0x2cfb4c, - 0x2d0807, - 0x2d31c7, - 0x26fd85, - 0x2c54c7, - 0x2d5848, - 0x31dfc6, - 0x2704cc, - 0x301fc8, - 0x2dd8c8, - 0x23ae06, - 0x2b1f07, - 0x210904, - 0x367848, - 0x28d20c, - 0x29144c, - 0x3e2345, - 0x3dcd47, - 0x389ec6, - 0x2b1e86, - 0x2bcb08, - 0x21b284, - 0x2671cb, - 0x28d94b, - 0x2ff2c6, - 0x207587, - 0x3572c5, - 0x2781c5, - 0x267306, - 0x259285, - 0x248a05, - 0x2d65c7, - 0x2b2789, - 0x273cc4, - 0x23d405, - 0x2f8ac5, - 0x358908, - 0x2bf505, - 0x2d1d09, - 0x39e2c7, - 0x39e2cb, - 0x2fd706, - 0x23f009, - 0x389d08, - 0x3ae7c5, - 0x355ac8, - 0x25e688, - 0x286407, - 0x2b5a87, - 0x38fcc9, - 0x274147, - 0x295c49, - 0x2d11cc, - 0x2dca48, - 0x2c0dc9, - 0x2c4d07, - 0x289c89, - 0x367207, - 0x223a88, - 0x358d45, - 0x2f3446, - 0x2d01c8, - 0x21c488, - 0x2d7489, - 0x248a47, - 0x278bc5, - 0x3cde49, - 0x2fde86, - 0x297b84, - 0x33ff06, - 0x26ad88, - 0x2e6587, - 0x20d508, - 0x26b3c9, - 0x3a1a87, - 0x2a3646, - 0x25ed44, - 0x2e5fc9, - 0x27ecc8, - 0x23acc7, - 0x2702c6, - 0x20d246, - 0x3473c4, - 0x26b5c6, - 0x205943, - 0x330209, - 0x330646, - 0x2a4905, - 0x2a6ec6, - 0x2db905, - 0x288b88, - 0x33f3c7, - 0x23bb46, - 0x25de86, - 0x31cd48, - 0x2aa787, - 0x29ff05, - 0x2a41c8, - 0x3b1b88, - 0x3bb508, - 0x23f445, - 0x2f34c6, - 0x32e249, - 0x3c2784, - 0x2db78b, - 0x22694b, - 0x2ef609, - 0x2059c3, - 0x257b05, - 0x2ef4c6, - 0x241f88, - 0x30a604, - 0x31a546, - 0x2ad8c9, - 0x2ce1c5, - 0x2d6506, - 0x2eed46, - 0x203f44, - 0x29a14a, - 0x2a4848, - 0x21c486, - 0x375c45, - 0x357147, - 0x33a2c7, - 0x21f744, - 0x226b87, - 0x2bffc4, - 0x369146, - 0x207883, - 0x26ff45, - 0x2ba485, - 0x25b688, - 0x287005, - 0x282849, - 0x2abc07, - 0x36768b, - 0x2abc0c, - 0x2ac20a, - 0x3513c7, - 0x203843, - 0x280d88, - 0x23f605, - 0x3025c5, - 0x35f284, - 0x223986, - 0x283746, - 0x26b607, - 0x3a9d8b, - 0x218e84, - 0x309d04, - 0x2d6784, - 0x2db206, - 0x203d44, - 0x21e488, - 0x35f085, - 0x21a245, - 0x2a3447, - 0x25ed89, - 0x33a085, - 0x39690a, - 0x2d5ac9, - 0x2aceca, - 0x3da249, - 0x354004, - 0x2cedc5, - 0x2c99c8, - 0x3aaacb, - 0x313005, - 0x2ecd46, - 0x241c04, - 0x282d86, - 0x3a1909, - 0x2ec1c7, - 0x33ef48, - 0x297706, - 0x3c9f87, - 0x289188, - 0x37c006, - 0x3d5e84, - 0x386b47, - 0x388705, - 0x398187, - 0x29f484, - 0x2c8206, - 0x30ca48, - 0x29fc48, - 0x33dec7, - 0x3801c8, - 0x29dec5, - 0x205804, - 0x377288, - 0x3802c4, - 0x21c345, - 0x30cc44, - 0x20ef87, - 0x291ac7, - 0x289dc8, - 0x2def06, - 0x286f85, - 0x282648, - 0x37e848, - 0x2a9449, - 0x226c46, - 0x2311c8, - 0x28970a, - 0x2a7ac8, - 0x308c85, - 0x22d9c6, - 0x2a9a48, - 0x20fb8a, - 0x265587, - 0x28e745, - 0x297d88, - 0x2b3a44, - 0x253786, - 0x2d3548, - 0x205886, - 0x33aa08, - 0x2d9e07, - 0x203686, - 0x2c9144, - 0x26a4c7, - 0x2c3304, - 0x3a18c7, - 0x23b00d, - 0x239945, - 0x2d8e4b, - 0x2916c6, - 0x252f48, - 0x2431c4, - 0x3c0706, - 0x285346, - 0x27eb87, - 0x29f2cd, - 0x305587, - 0x2c3b48, - 0x28bd45, - 0x296c88, - 0x2d6dc6, + 0x28948a, + 0x232b87, + 0x292905, 0x29df48, - 0x38ecc6, - 0x3b3a07, - 0x28a149, - 0x35fe07, - 0x290348, - 0x34c1c5, - 0x22bc48, - 0x2b1dc5, - 0x2d6d05, - 0x37d145, - 0x24dc03, - 0x208084, - 0x297f85, - 0x3a9a89, - 0x36ec46, - 0x2ebe88, - 0x2eefc5, - 0x2c5387, - 0x2e0fca, - 0x2d6449, - 0x2d540a, - 0x2e1f08, - 0x22830c, - 0x28a90d, - 0x314e43, - 0x33a908, - 0x20b305, - 0x2f4006, - 0x3ccfc6, - 0x2d2405, - 0x355449, - 0x348ec5, - 0x282648, - 0x258946, - 0x370286, - 0x2ab089, - 0x3b0b47, - 0x298686, - 0x2e0f48, - 0x3d5388, - 0x2efec7, - 0x2cf3ce, - 0x2d7005, - 0x3c7b45, - 0x205788, - 0x36f947, - 0x20d282, - 0x2cf804, - 0x23b58a, - 0x23ad88, - 0x25e7c6, - 0x2a1848, - 0x2a6fc6, - 0x25f708, - 0x2b8cc8, - 0x30b3c4, - 0x2c5745, - 0x602284, - 0x602284, - 0x602284, - 0x207783, - 0x20d0c6, - 0x282b06, - 0x2a5dcc, - 0x202503, - 0x2d75c6, - 0x207844, - 0x288e88, - 0x2ad705, - 0x23b686, - 0x2cc888, - 0x2e3246, - 0x23bac6, - 0x203d48, - 0x2dc487, - 0x273f09, - 0x3df7ca, - 0x26dbc4, - 0x295d45, - 0x2b7645, - 0x2d9a86, - 0x23e5c6, - 0x2a5b46, - 0x3d3f06, - 0x274044, - 0x27404b, - 0x266cc4, - 0x23f185, - 0x2b7cc5, - 0x3aa046, - 0x209648, - 0x28a7c7, - 0x3305c4, - 0x213c03, - 0x2b3545, - 0x33fdc7, - 0x28a6cb, - 0x25b587, - 0x2cc788, - 0x2c5887, - 0x2715c6, - 0x25c348, - 0x2cad4b, - 0x34e2c6, - 0x214a09, - 0x2caec5, - 0x325343, - 0x2d6506, - 0x2d9d08, - 0x215203, - 0x2a11c3, - 0x289186, - 0x2a6fc6, - 0x379eca, - 0x284945, - 0x28518b, - 0x2a6e0b, - 0x2163c3, + 0x272e04, + 0x25ec86, + 0x2d7f88, + 0x20d986, + 0x3d55c8, + 0x237bc7, + 0x20cf06, + 0x2cd844, + 0x326687, + 0x2c6a84, + 0x353747, + 0x3cf84d, + 0x234805, + 0x2cc30b, + 0x244206, + 0x25e4c8, + 0x24cf04, + 0x26a146, + 0x28aa06, + 0x22e187, + 0x2a428d, + 0x3089c7, + 0x2c70c8, + 0x294145, + 0x219948, + 0x2dbac6, + 0x2a1f08, + 0x3e0d46, + 0x36c9c7, + 0x2e2d89, + 0x338587, + 0x294808, + 0x269145, + 0x2357c8, + 0x327d05, + 0x2334c5, + 0x379205, + 0x20af03, + 0x202644, + 0x245745, + 0x24b649, + 0x372a06, + 0x2ea908, + 0x289685, + 0x2c9847, + 0x2a90ca, + 0x2dff89, + 0x2d7bca, + 0x2e5708, + 0x22850c, + 0x28fe8d, + 0x31e443, + 0x3d54c8, + 0x211145, + 0x353d86, + 0x3d1d86, + 0x321f85, + 0x2586c9, + 0x30db85, + 0x286088, + 0x2648c6, + 0x36ad46, + 0x2af1c9, + 0x266e47, + 0x29be06, + 0x2a9048, + 0x245f48, + 0x2f0987, + 0x2e050e, + 0x2dbd05, + 0x3c3605, + 0x20d888, + 0x3a21c7, + 0x204502, + 0x2d4944, + 0x27fa8a, + 0x3cf5c8, + 0x3c3186, + 0x2a6108, + 0x2ab406, + 0x340f48, + 0x2bbb88, + 0x233484, + 0x2c9c05, + 0x70e7c4, + 0x70e7c4, + 0x70e7c4, + 0x201f03, + 0x204346, + 0x286546, + 0x2aa3cc, + 0x20cf43, + 0x270b06, + 0x201fc4, + 0x2e38c8, + 0x2b2605, + 0x27fb86, + 0x2d1208, + 0x2e6386, + 0x3c2706, + 0x29e748, + 0x2e7047, + 0x230f49, + 0x2f04ca, + 0x274544, + 0x246005, + 0x30a405, + 0x2e3646, + 0x23e906, + 0x2aa146, + 0x382006, + 0x231084, + 0x23108b, + 0x239644, + 0x201c05, + 0x2bab85, + 0x2605c6, + 0x20ec48, + 0x28fd47, + 0x331ac4, + 0x211583, + 0x272905, + 0x37f3c7, + 0x28fc4b, + 0x347087, + 0x2d1108, + 0x2c9d47, + 0x276a06, + 0x271fc8, + 0x2cf3cb, + 0x2cba46, + 0x212949, + 0x2cf545, + 0x322d43, + 0x2e0046, + 0x237ac8, + 0x215ec3, + 0x29acc3, + 0x28d906, + 0x2ab406, + 0x37604a, + 0x28a2c5, + 0x28a84b, + 0x2ab24b, + 0x217e03, + 0x209b03, + 0x2c3044, + 0x2e4887, + 0x237b44, + 0x29b2c4, + 0x2c9e84, + 0x38cb08, + 0x371488, + 0x211b89, + 0x2e3488, + 0x3a0087, + 0x224006, + 0x2ea54f, + 0x2dbe46, + 0x2e4dc4, + 0x3712ca, + 0x37f2c7, + 0x2c6b86, + 0x29b109, + 0x211b05, + 0x3472c5, + 0x211c46, + 0x235903, + 0x272e49, + 0x21d106, + 0x3a93c9, + 0x3947c6, + 0x2759c5, + 0x20b085, + 0x202643, + 0x2e49c8, + 0x332207, + 0x2f6084, + 0x2e3748, + 0x29d844, + 0x31ff46, + 0x2b49c6, + 0x247dc6, + 0x2e6d09, + 0x219805, + 0x2a80c6, + 0x25c0c9, + 0x2db246, + 0x2ea646, + 0x3abbc6, + 0x209085, + 0x3ae986, + 0x36c9c4, + 0x3c1585, + 0x2d5304, + 0x2c8f46, + 0x354704, + 0x201c03, + 0x292585, + 0x23d308, + 0x35a407, + 0x2fb609, + 0x292808, + 0x2a5891, + 0x2cb64a, + 0x3036c7, + 0x237d06, + 0x201fc4, + 0x2d5408, + 0x293688, + 0x2a5a4a, + 0x2d6e4d, + 0x216b86, + 0x29e846, + 0x326746, + 0x2ac3c7, + 0x2c7185, + 0x30d607, + 0x201e85, + 0x398404, + 0x3c2d46, + 0x2884c7, + 0x272b4d, + 0x2adf07, + 0x2bfb48, + 0x286389, + 0x27bac6, + 0x2f3fc5, + 0x2e9284, + 0x3a90c6, + 0x353a46, + 0x3cf746, + 0x2a6988, + 0x22d503, + 0x21b783, + 0x32c105, + 0x322606, + 0x2bbb45, + 0x249b48, + 0x2a9bca, + 0x239084, + 0x2e38c8, + 0x2a3448, + 0x3a9947, + 0x37b249, + 0x2d0e08, + 0x294007, + 0x2d3fc6, + 0x20d98a, + 0x3a9148, + 0x31dac9, + 0x2afb48, + 0x222849, + 0x35d787, + 0x208f45, + 0x2ab806, + 0x2c0f08, + 0x281fc8, + 0x261d08, + 0x342688, + 0x201c05, + 0x200d04, + 0x23bec8, + 0x24a604, + 0x3e3004, + 0x2759c5, + 0x29d187, + 0x3e4109, + 0x22df87, + 0x226b05, + 0x282e86, + 0x370d46, + 0x20ed44, + 0x2af4c6, + 0x28ac84, + 0x3d4886, + 0x3e3ec6, + 0x215d06, + 0x3de685, + 0x249a07, + 0x20ac43, + 0x22bf49, + 0x31ed88, + 0x293e84, + 0x293e8d, + 0x2a4d08, + 0x3082c8, + 0x31da46, + 0x2e2e89, + 0x2dff89, + 0x353485, + 0x2a9cca, + 0x27cb0a, + 0x291a4c, + 0x291bc6, + 0x284786, + 0x2dc446, + 0x3a6ac9, + 0x353fc6, + 0x2ae906, + 0x30dc46, + 0x221408, + 0x306e06, + 0x2e43cb, + 0x29d305, + 0x249c05, + 0x285285, + 0x30ef06, + 0x20d943, + 0x247d46, + 0x2ade87, + 0x2d52c5, + 0x2f4ec5, + 0x2c4945, + 0x2f9646, + 0x336cc4, + 0x336cc6, + 0x2a3d09, + 0x30ed8c, + 0x398148, + 0x25cf04, + 0x39d6c6, + 0x244306, + 0x237ac8, + 0x21b6c8, + 0x30ec89, + 0x201b87, + 0x2fbc09, + 0x27d9c6, + 0x216b04, + 0x210484, + 0x28df04, + 0x28d908, + 0x3e3f4a, + 0x33ac06, + 0x368807, + 0x396387, + 0x244f85, + 0x2b9304, + 0x299d86, + 0x2c71c6, + 0x205483, + 0x31ebc7, + 0x22a948, + 0x3535ca, + 0x202148, + 0x273148, + 0x354745, + 0x237105, + 0x2fbb45, + 0x245346, + 0x246e46, + 0x316a45, + 0x331949, + 0x2b910c, + 0x34ca47, + 0x2a5ac8, + 0x282185, + 0x70e7c4, + 0x236b04, + 0x2cc644, + 0x3d0ac6, + 0x2ad28e, + 0x347347, + 0x2ac5c5, + 0x30c48c, + 0x29d707, + 0x288447, + 0x2c8989, + 0x21ae49, + 0x292905, + 0x31ed88, + 0x369ac9, + 0x3c59c5, + 0x2d5208, + 0x2c1f86, + 0x2411c6, + 0x24fb84, + 0x2aa648, 0x206743, - 0x2bff44, - 0x2e0e07, - 0x27d904, - 0x25ef44, - 0x2c2bc4, - 0x2a7dc8, - 0x375b88, - 0x20c409, - 0x2d98c8, - 0x37d3c7, - 0x24bc06, - 0x2ebacf, - 0x2d7146, - 0x2e15c4, - 0x3759ca, - 0x33fcc7, - 0x2c3406, - 0x297bc9, - 0x20c385, - 0x25b7c5, - 0x20c4c6, - 0x22bd83, - 0x2b3a89, - 0x223886, - 0x26b189, - 0x396e86, - 0x26ff45, - 0x361bc5, - 0x206643, - 0x3131c8, - 0x330d07, - 0x355604, - 0x288d08, - 0x2e11c4, - 0x31c046, - 0x2b6486, - 0x23d846, - 0x2dc149, - 0x302545, - 0x29ffc6, - 0x277389, - 0x2d6146, - 0x2a7e46, - 0x3a8b46, - 0x22e405, - 0x30cc46, - 0x3b3a04, - 0x358d45, - 0x21c484, - 0x2c45c6, - 0x273404, - 0x207a43, - 0x28e3c5, - 0x234f88, - 0x366a47, - 0x30a689, - 0x28e648, - 0x2a0951, - 0x2eedca, - 0x2ff207, - 0x3d8686, - 0x207844, + 0x203f44, + 0x272985, + 0x39b187, + 0x26a445, + 0x28f4c9, + 0x29b80d, + 0x2b2ec6, + 0x2115c4, + 0x288608, + 0x22384a, + 0x3e9847, + 0x2b0c45, + 0x203f83, + 0x2ab40e, + 0x2e4acc, + 0x33b107, + 0x2ad447, + 0x4d39a7c7, + 0x143386, + 0x27804, + 0x212fc3, + 0x354005, + 0x2cc645, + 0x2a64c8, + 0x2a3289, + 0x25ce06, + 0x237b44, + 0x303606, + 0x245d0b, + 0x2da2cc, + 0x257b47, + 0x2e4685, + 0x3d4088, + 0x2f0745, + 0x3712c7, + 0x2e2c47, + 0x2494c5, + 0x20d943, + 0x2abd84, + 0x288cc5, + 0x36a505, + 0x36a506, + 0x2a0088, + 0x2884c7, + 0x3d2086, + 0x3cfb06, + 0x379146, + 0x3dcac9, + 0x3cda87, + 0x25cc86, + 0x2da446, + 0x387046, + 0x2b7d05, + 0x218586, + 0x3b5545, + 0x3a8a48, + 0x29cdcb, + 0x2998c6, + 0x3963c4, + 0x222e09, + 0x221244, + 0x2c1f08, + 0x311007, + 0x290f04, 0x2d02c8, - 0x2e2e88, - 0x2a0b0a, - 0x2d1acd, - 0x2a9606, - 0x203e46, - 0x26a586, - 0x21a247, - 0x2c3c05, - 0x35c6c7, - 0x207705, - 0x39e404, - 0x206686, - 0x30ec47, - 0x2b378d, - 0x2a9987, - 0x344fc8, - 0x282949, - 0x22d8c6, - 0x360dc5, - 0x2393c4, - 0x26ae86, - 0x21f646, - 0x23af06, - 0x2a20c8, - 0x22cdc3, - 0x23e443, - 0x34bcc5, - 0x2d2a86, - 0x2b8c85, - 0x297908, - 0x2a55ca, - 0x33f504, - 0x288e88, - 0x299fc8, - 0x25ef47, - 0x28ed49, - 0x2cc488, - 0x287e47, - 0x2c2e46, - 0x20588a, - 0x26af08, - 0x32df09, - 0x2ae088, - 0x224a09, - 0x3d8547, - 0x35ce45, - 0x2a73c6, - 0x31de48, - 0x2530c8, - 0x2bbfc8, - 0x21e608, - 0x23f185, - 0x200d04, - 0x233908, - 0x241984, - 0x3da044, - 0x26ff45, - 0x2997c7, - 0x25eb49, - 0x27e987, - 0x2260c5, - 0x27d706, - 0x375446, - 0x209744, - 0x2ab3c6, - 0x2855c4, - 0x293ec6, - 0x25e906, - 0x215046, - 0x3d5c05, - 0x2977c7, - 0x203843, - 0x22b509, - 0x31cb48, - 0x287cc4, - 0x287ccd, - 0x29fd48, - 0x2fcd48, - 0x32de86, - 0x28a249, - 0x2d6449, - 0x3a1605, - 0x2a56ca, - 0x2a844a, - 0x2b5c8c, - 0x2b5e06, - 0x280986, - 0x2d79c6, - 0x393189, - 0x2f4246, - 0x223b06, - 0x348f86, - 0x367848, - 0x37e646, - 0x2e094b, - 0x299945, - 0x21a245, - 0x281485, - 0x3b5146, - 0x205843, - 0x23d7c6, - 0x2a9907, - 0x2d0185, - 0x27fbc5, - 0x2c12c5, - 0x301c46, - 0x336144, - 0x336146, - 0x2a9e49, - 0x3b4fcc, - 0x39e148, - 0x2520c4, - 0x30c946, - 0x2917c6, - 0x2d9d08, - 0x2c2f48, - 0x3b4ec9, - 0x357147, - 0x237809, - 0x278286, - 0x22c544, - 0x20af04, - 0x286dc4, - 0x289188, - 0x25e98a, - 0x33a006, - 0x36eb07, - 0x398407, - 0x23f105, - 0x2b7604, - 0x2958c6, - 0x2c3c46, - 0x21b2c3, - 0x31c987, - 0x2205c8, - 0x3a174a, - 0x22e4c8, - 0x34df48, - 0x273445, - 0x2a6205, - 0x237745, - 0x23f4c6, - 0x242546, - 0x25d405, - 0x330449, - 0x2b740c, - 0x307d87, - 0x2a0b88, - 0x251045, - 0x602284, - 0x267c84, - 0x2d9184, - 0x212d06, - 0x2a8d0e, - 0x25b847, - 0x21a445, - 0x3c270c, - 0x3d2347, - 0x30ebc7, - 0x30f7c9, - 0x221a89, - 0x28e745, - 0x31cb48, - 0x32e249, - 0x3bb3c5, + 0x2d6984, + 0x2b7d44, + 0x293dc5, + 0x342586, + 0x38ca47, + 0x235f03, + 0x2a7f05, + 0x34bb44, + 0x3c3646, + 0x353508, + 0x32c785, + 0x29ca89, + 0x207985, + 0x3ca8c8, + 0x326c47, + 0x331c48, + 0x2cff07, + 0x395809, + 0x291186, + 0x397c06, + 0x29fa84, + 0x223f45, + 0x3151cc, + 0x285287, + 0x285a87, + 0x23e548, + 0x2b2ec6, + 0x2addc4, + 0x37ddc4, + 0x28da89, + 0x2dc546, + 0x293547, + 0x27b884, + 0x2af5c6, + 0x3de9c5, + 0x2e2ac7, + 0x2e4346, + 0x26ae49, + 0x2d2747, + 0x2a1447, + 0x2af046, + 0x291785, + 0x28c108, + 0x21cf88, + 0x36db46, + 0x32c7c5, + 0x2d8fc6, + 0x20d083, + 0x2a6349, + 0x2a9ece, + 0x2cfc48, + 0x29d948, + 0x36d94b, + 0x29ccc6, + 0x396284, + 0x28fa84, + 0x2a9fca, + 0x21a2c7, + 0x25cd45, + 0x212949, + 0x2d3f05, + 0x3e3047, + 0x29e6c4, + 0x206647, + 0x216a08, + 0x2c96c6, + 0x2cca89, + 0x2d0f0a, + 0x21a246, + 0x2a4846, + 0x2bab05, + 0x39c545, + 0x3b0e47, + 0x24f388, + 0x3de908, + 0x233486, + 0x20b105, + 0x23e68e, + 0x2d15c4, + 0x2a6445, + 0x282809, + 0x2f8a08, + 0x298006, + 0x2a82cc, + 0x2a97d0, + 0x2acecf, + 0x2ae648, + 0x34ff07, + 0x3de685, + 0x245745, + 0x38c8c9, + 0x29e149, + 0x283546, + 0x30c347, + 0x39d7c5, + 0x23ae89, + 0x35dfc6, + 0x353e0d, + 0x28ddc9, + 0x29b2c4, + 0x2cf6c8, + 0x23bf89, + 0x33adc6, + 0x284d85, + 0x397c06, + 0x27ae09, + 0x27b708, + 0x20f1c5, + 0x28f704, + 0x2a848b, + 0x33ac85, + 0x246586, + 0x2901c6, + 0x259446, + 0x3d0e8b, + 0x29cb89, + 0x225005, + 0x396f87, + 0x2cb5c6, + 0x25e646, + 0x28f388, + 0x2d40c9, + 0x2bf90c, + 0x37f1c8, + 0x31d186, + 0x3356c3, + 0x38adc6, + 0x302505, + 0x28b388, + 0x20ab06, + 0x3c8888, + 0x3b0905, + 0x267305, + 0x326d88, + 0x3b6ec7, + 0x3d1cc7, + 0x2f3b87, + 0x32f908, + 0x350048, + 0x2f1086, + 0x2c8d87, + 0x21ea07, + 0x39558a, + 0x210843, + 0x30ef06, + 0x23e605, + 0x27fa84, + 0x286389, + 0x395784, + 0x2c96c4, + 0x2a9444, + 0x2ad44b, + 0x332147, + 0x23e8c5, + 0x2a1b88, + 0x282e86, + 0x282e88, + 0x28a206, + 0x298945, + 0x298b85, + 0x29a546, + 0x30e148, + 0x29b048, + 0x286546, + 0x2a19cf, + 0x2a5e10, + 0x20d485, + 0x20ac43, + 0x237405, + 0x31d348, + 0x29e049, + 0x3c5b08, + 0x30c148, + 0x25ae48, + 0x332207, + 0x282b49, + 0x3c8a88, + 0x2b3944, + 0x2a92c8, + 0x347889, + 0x2c9307, + 0x2bc7c4, + 0x22e048, + 0x2497ca, + 0x2ee946, + 0x216b86, + 0x223549, + 0x2a9a07, + 0x2e0b08, + 0x244808, + 0x3d01c8, + 0x2796c5, + 0x386605, + 0x249c05, + 0x2cc605, + 0x2c6347, + 0x20d945, + 0x2d52c5, + 0x386b86, + 0x3c5a47, + 0x3afd07, + 0x249ac6, + 0x2e5c45, + 0x246586, + 0x205405, + 0x2c1d88, + 0x2f9e44, + 0x2db2c6, + 0x351e44, 0x2d00c8, - 0x2c1006, - 0x377506, - 0x2463c4, - 0x294908, - 0x204883, - 0x20ccc4, - 0x2b35c5, - 0x39db87, - 0x2e5e45, - 0x2895c9, - 0x29664d, - 0x2af506, - 0x213c44, - 0x36ee08, - 0x2b25ca, - 0x2144c7, - 0x34bb05, - 0x20cd03, - 0x2a6fce, - 0x3132cc, - 0x30ce47, - 0x2a8ec7, - 0x4539cd47, - 0xb20c6, - 0x27c44, - 0x215d03, - 0x2f4285, - 0x2d9185, - 0x2a1c08, - 0x29edc9, - 0x251fc6, - 0x27d904, - 0x2ff146, - 0x2398cb, - 0x2eab4c, - 0x24dcc7, - 0x2e0c05, - 0x3b1a88, - 0x2efc85, - 0x3759c7, - 0x307b87, - 0x2475c5, - 0x205843, - 0x21fac4, - 0x2e6445, - 0x273bc5, - 0x273bc6, - 0x2a2608, - 0x30ec47, - 0x3cd2c6, - 0x3472c6, - 0x37d086, - 0x30f0c9, - 0x27ef47, - 0x251e46, - 0x2eacc6, - 0x3cae06, - 0x2b4385, - 0x20e046, - 0x3b3245, - 0x2bf588, - 0x29940b, - 0x295606, - 0x398444, - 0x305bc9, - 0x2abc04, - 0x2c0f88, - 0x3116c7, - 0x28bac4, - 0x2cb948, - 0x2d1604, - 0x2b43c4, - 0x27a345, - 0x322506, - 0x2a7d07, - 0x249b03, - 0x2a3705, - 0x2ff4c4, - 0x3c7b86, - 0x3a1688, - 0x37e545, - 0x2990c9, - 0x3513c5, - 0x323488, - 0x2bc807, - 0x330748, - 0x2cb587, - 0x2fdb49, - 0x287f86, - 0x372946, - 0x29b284, - 0x309c45, - 0x31520c, - 0x281487, - 0x282047, - 0x23e208, - 0x2af506, - 0x2a9844, - 0x34a144, - 0x38fb49, - 0x2d7ac6, - 0x296b47, - 0x27e7c4, - 0x2ab4c6, - 0x3c1685, - 0x2dea47, - 0x2e08c6, - 0x261609, - 0x39b307, - 0x29d487, - 0x2aaf06, - 0x270205, - 0x286b08, - 0x223708, - 0x371f86, - 0x37e585, - 0x2e93c6, - 0x203803, - 0x2a1a89, - 0x2a58ce, - 0x2cb2c8, - 0x2e12c8, - 0x371d8b, - 0x299306, - 0x398304, - 0x23bac4, - 0x2a59ca, - 0x2134c7, - 0x251f05, - 0x214a09, - 0x2cf305, - 0x3da087, - 0x232144, - 0x204787, - 0x322608, - 0x2d6c46, - 0x2c8389, - 0x2cc58a, - 0x213446, - 0x29f886, - 0x2b7c45, - 0x39f685, - 0x37d947, - 0x246d48, - 0x3c15c8, - 0x30b3c6, - 0x361c45, - 0x23e34e, - 0x2ccc44, - 0x2a1b85, - 0x27d089, - 0x2f7588, - 0x2931c6, - 0x2a3ccc, - 0x2a51d0, - 0x2a894f, - 0x2aa508, - 0x3513c7, - 0x3d5c05, - 0x297f85, - 0x2a7b89, - 0x297f89, - 0x27ddc6, - 0x313087, - 0x309b45, - 0x337c49, - 0x363386, - 0x2f408d, - 0x286c89, - 0x25ef44, - 0x2cb048, - 0x2339c9, - 0x33a1c6, - 0x280f85, - 0x372946, - 0x33ee09, - 0x27e648, - 0x210ec5, - 0x289804, - 0x2a3e8b, - 0x33a085, - 0x242006, - 0x28ac46, - 0x22a986, - 0x25e24b, - 0x2991c9, - 0x347205, - 0x399007, - 0x2eed46, - 0x233086, - 0x289488, - 0x30ed89, - 0x344d8c, - 0x33fbc8, - 0x31e806, - 0x333e83, - 0x360186, - 0x2b58c5, - 0x285cc8, - 0x3e21c6, - 0x2dec88, - 0x24be05, - 0x293f85, - 0x2c0b88, - 0x3d5247, - 0x3ccf07, - 0x26b607, - 0x31fc48, - 0x2d9b88, - 0x2e2d86, - 0x2c4407, - 0x34d947, - 0x2b578a, - 0x238643, - 0x3b5146, - 0x23e2c5, - 0x215cc4, - 0x282949, - 0x2fdac4, - 0x2cbd84, - 0x2a3944, - 0x2a8ecb, - 0x330c47, - 0x23e585, - 0x29dbc8, - 0x27d706, - 0x27d708, - 0x284886, - 0x294845, - 0x294b05, - 0x296086, - 0x2971c8, - 0x297b08, - 0x282b06, - 0x29da0f, - 0x2a1550, - 0x205385, - 0x203843, - 0x22c605, - 0x321108, - 0x297e89, - 0x3bb508, - 0x312e88, - 0x385808, - 0x330d07, - 0x27d3c9, - 0x2dee88, - 0x2a4f84, - 0x2a37c8, - 0x3589c9, - 0x2c4a07, - 0x395d44, - 0x27ea48, - 0x29758a, - 0x2ff746, - 0x2a9606, - 0x226b09, - 0x2a5407, - 0x2dbfc8, - 0x2321c8, - 0x347988, - 0x259805, - 0x21ce85, - 0x21a245, - 0x2d9145, - 0x2c2747, - 0x205845, - 0x2d0185, - 0x203546, - 0x3bb447, - 0x3aaa07, - 0x297886, - 0x2e2445, - 0x242006, - 0x280e45, - 0x2c7d08, - 0x309ac4, - 0x2d61c6, - 0x353544, - 0x2cb748, - 0x32288a, - 0x28328c, - 0x2a6505, - 0x21a306, - 0x344f46, - 0x348d86, - 0x31e884, - 0x3cd585, - 0x284147, - 0x2a5489, - 0x2db647, - 0x602284, - 0x602284, - 0x330ac5, - 0x2dfe04, - 0x2a328a, - 0x27d586, - 0x2c0b04, - 0x3dccc5, - 0x2c1d85, - 0x2c3b44, - 0x28a887, - 0x3cdfc7, - 0x2db208, - 0x2e94c8, - 0x210ec9, - 0x388d08, - 0x29048b, - 0x2a7cc4, - 0x233185, - 0x38ff05, - 0x26b589, - 0x30ed89, - 0x305ac8, - 0x368f48, - 0x2e6bc4, - 0x291805, - 0x204083, - 0x2d9a45, - 0x2a0046, - 0x29ec0c, - 0x21f546, - 0x280e86, - 0x293445, - 0x301cc8, - 0x2eadc6, - 0x3d8806, - 0x2a9606, - 0x22e24c, - 0x38ffc4, - 0x37d1ca, - 0x293388, - 0x29ea47, - 0x2ff3c6, - 0x252087, - 0x2fed45, - 0x2702c6, - 0x363d86, - 0x377987, - 0x2cc284, - 0x20f085, - 0x27d084, - 0x39e487, - 0x27d2c8, - 0x28080a, - 0x288587, - 0x2ac487, - 0x351347, - 0x2efdc9, - 0x29ec0a, - 0x22c503, - 0x366a05, - 0x215083, - 0x2c2c09, - 0x2d9f48, - 0x388ac7, - 0x3bb609, - 0x223806, - 0x358e08, - 0x3c4b45, - 0x37e94a, - 0x2079c9, - 0x29c289, - 0x2d5707, - 0x2e2f89, - 0x214f48, - 0x25f906, - 0x21a4c8, - 0x27ff07, - 0x274147, - 0x2d5ac7, - 0x2dd548, - 0x30c7c6, - 0x297345, - 0x284147, - 0x29f388, - 0x37d004, - 0x306dc4, - 0x298587, - 0x2b9047, - 0x32e0ca, - 0x25f886, - 0x3c82ca, - 0x2cf747, - 0x2cca07, - 0x20f144, - 0x295d04, - 0x2de946, - 0x361444, - 0x36144c, - 0x311605, - 0x21c2c9, - 0x2f0e84, - 0x2c3c05, - 0x2b2548, - 0x297bc5, - 0x396906, - 0x2980c4, - 0x2ab98a, - 0x384806, - 0x24774a, - 0x3da407, - 0x20d645, - 0x22bd85, - 0x23f14a, - 0x247685, - 0x2a7b46, - 0x241984, - 0x2c00c6, - 0x37da05, - 0x3e2286, - 0x33decc, - 0x2e3cca, - 0x2a8544, - 0x24bc06, - 0x2a5407, - 0x2e0844, - 0x367848, - 0x2ecc46, - 0x398289, - 0x2cd009, - 0x2dcb49, - 0x2db946, - 0x280006, - 0x21a607, - 0x330388, - 0x27fe09, - 0x330c47, - 0x29dd46, - 0x3ca007, - 0x26a445, - 0x2ccc44, - 0x21a1c7, - 0x34db05, - 0x28f645, + 0x2db3ca, + 0x286ccc, + 0x2aaa05, + 0x2ac486, + 0x2bfac6, + 0x3b6806, + 0x31d204, + 0x3df285, + 0x289ac7, + 0x2a9a89, + 0x2e03c7, + 0x70e7c4, + 0x70e7c4, + 0x331fc5, + 0x312a84, + 0x2a7a8a, + 0x282d06, + 0x3698c4, + 0x203345, + 0x2c5405, + 0x2c70c4, + 0x28fe07, + 0x207907, + 0x2dfe08, + 0x2d90c8, + 0x20f1c9, + 0x29ae48, + 0x29494b, + 0x239704, + 0x29eb45, + 0x28c845, + 0x2f3b09, + 0x2d40c9, + 0x222d08, + 0x3d8dc8, + 0x2605c4, + 0x244345, + 0x20dec3, + 0x2e3605, + 0x2a8146, + 0x2a30cc, + 0x21d006, + 0x284c86, + 0x298285, + 0x2f96c8, + 0x2dac86, + 0x237e86, + 0x216b86, + 0x22b48c, + 0x27d4c4, + 0x37928a, + 0x2981c8, + 0x2a2f07, + 0x34ba46, + 0x25cec7, + 0x303205, + 0x291846, + 0x35ecc6, + 0x3728c7, + 0x2d0c04, + 0x20b805, + 0x282804, + 0x398487, + 0x282a48, + 0x28460a, + 0x28ca87, + 0x2b7907, + 0x34fe87, + 0x2f0889, + 0x2a30ca, + 0x208fc3, + 0x35a3c5, + 0x215d43, + 0x2c9ec9, + 0x36cc48, + 0x368947, + 0x3c5c09, + 0x21d086, + 0x3c1648, + 0x2c4685, + 0x32cb8a, + 0x20df09, + 0x27a1c9, + 0x2dab47, + 0x293789, + 0x215c08, + 0x3e3c46, + 0x2ac648, + 0x2f5207, + 0x231187, + 0x2deb07, + 0x2cdf88, + 0x39ab06, + 0x249585, + 0x289ac7, + 0x2a4348, + 0x3790c4, + 0x309684, + 0x29bd07, + 0x2bbf07, + 0x36994a, + 0x3e3bc6, + 0x3ce54a, + 0x2d4887, + 0x2d1387, + 0x20b8c4, + 0x29a1c4, + 0x2e29c6, + 0x2f4644, + 0x2f464c, + 0x310f45, + 0x216849, + 0x3caa44, + 0x2c7185, + 0x2237c8, + 0x27a545, + 0x394246, + 0x29e284, + 0x2a6d0a, + 0x2dddc6, + 0x3501ca, + 0x3e33c7, + 0x2048c5, + 0x235905, + 0x244fca, + 0x281f05, + 0x2ada86, + 0x24a604, + 0x2c31c6, + 0x3b0f05, + 0x20abc6, + 0x32a64c, + 0x22b70a, + 0x27cc04, + 0x224006, + 0x2a9a07, + 0x2e42c4, + 0x221408, + 0x2ed306, + 0x396209, + 0x3dd0c9, + 0x2bb909, + 0x2241c6, + 0x2f5306, + 0x2ac787, + 0x331888, + 0x2f5109, + 0x332147, + 0x2a1d06, + 0x347ac7, + 0x326605, + 0x2d15c4, + 0x2ac347, + 0x21ebc5, + 0x293d05, 0x200cc7, - 0x247488, - 0x3b1a06, - 0x2a01cd, - 0x2a1e0f, - 0x2a6e0d, - 0x226104, - 0x235086, - 0x2e4088, - 0x348f45, - 0x2b5948, - 0x2862ca, - 0x25ef44, - 0x239b06, - 0x211787, - 0x218e87, - 0x2dc549, - 0x21a485, - 0x2c3b44, - 0x2c568a, - 0x2cc049, - 0x2e3087, - 0x30d406, - 0x33a1c6, - 0x291746, - 0x386c06, - 0x2e398f, - 0x2e3f49, - 0x37e646, - 0x38f786, - 0x32fa49, - 0x2c4507, - 0x220d03, - 0x22e3c6, - 0x20c483, - 0x2d22c8, - 0x2b0e07, - 0x2aa709, - 0x2b6308, - 0x3cd048, - 0x367346, - 0x21f489, - 0x307cc5, - 0x2a3504, - 0x35cf07, - 0x393205, - 0x226104, - 0x23e648, - 0x213784, - 0x2c4247, - 0x399c86, - 0x26c5c5, - 0x2ae088, - 0x33a08b, - 0x31d047, - 0x23f3c6, - 0x2d71c4, - 0x3aef86, - 0x26ff45, - 0x34db05, - 0x286889, - 0x28a489, - 0x274184, - 0x2741c5, - 0x24bc45, - 0x37e7c6, - 0x31cc48, - 0x2ce7c6, - 0x22040b, - 0x3d774a, - 0x2cb685, - 0x294b86, - 0x25b285, - 0x3c2205, - 0x256147, - 0x3b53c8, - 0x237804, - 0x385406, - 0x297b86, - 0x215107, - 0x325304, - 0x285346, - 0x229845, - 0x229849, - 0x280204, - 0x2b7789, - 0x282b06, - 0x2d08c8, - 0x24bc45, - 0x398505, - 0x3e2286, - 0x344c89, - 0x221a89, - 0x280f06, - 0x2f7688, - 0x296788, - 0x25b244, - 0x2c6604, - 0x2c6608, - 0x3326c8, - 0x237909, - 0x29ffc6, - 0x2a9606, - 0x33964d, - 0x31a546, - 0x378f09, - 0x201f45, - 0x20c4c6, - 0x347b08, - 0x336085, - 0x34d984, - 0x26ff45, - 0x289fc8, - 0x2a3049, - 0x27d144, - 0x2c8206, - 0x29c4ca, - 0x30cd48, - 0x32e249, - 0x270bca, - 0x3bb586, - 0x2a1fc8, - 0x375785, - 0x293608, - 0x2fedc5, - 0x2236c9, - 0x33bc49, - 0x21fb82, - 0x2caec5, - 0x277f06, - 0x282a47, - 0x215cc5, - 0x33eb86, - 0x319508, - 0x2af506, - 0x2c9889, - 0x282146, - 0x289308, - 0x24ef85, - 0x394886, - 0x3b3b08, - 0x289188, - 0x3d8448, - 0x31b948, - 0x20e044, - 0x21f783, - 0x2c9ac4, - 0x288786, - 0x26a484, - 0x2e1207, - 0x3d8709, - 0x2d6785, - 0x2321c6, - 0x22e3c6, - 0x2a244b, - 0x2c3346, - 0x273686, - 0x2d62c8, - 0x266cc6, - 0x20d443, - 0x20bb03, - 0x2ccc44, - 0x2310c5, - 0x23f747, - 0x27d2c8, - 0x27d2cf, - 0x28404b, - 0x31ca48, - 0x2c8286, - 0x31cd4e, - 0x23f583, - 0x23f6c4, - 0x2c32c5, - 0x2c39c6, - 0x2959cb, - 0x299886, - 0x30c009, - 0x26c5c5, - 0x249a48, - 0x209bc8, - 0x22194c, - 0x2a8f06, - 0x2d9a86, - 0x2e5145, - 0x290108, - 0x283285, - 0x3505c8, - 0x2a404a, - 0x2a7249, - 0x602284, + 0x249388, + 0x3d4006, + 0x2a51cd, + 0x2a66cf, + 0x2ab24d, + 0x223444, + 0x23d406, + 0x2e7b48, + 0x30dc05, + 0x245488, + 0x283bca, + 0x29b2c4, + 0x2c67c6, + 0x215307, + 0x214ac7, + 0x2e7109, + 0x2ac605, + 0x2c70c4, + 0x2c9b4a, + 0x2d09c9, + 0x293887, + 0x2a5486, + 0x33adc6, + 0x244286, + 0x3b2746, + 0x2e69cf, + 0x2e7a09, + 0x306e06, + 0x267246, + 0x20c049, + 0x2c8e87, + 0x201543, + 0x209046, + 0x211c03, + 0x321e48, + 0x26a007, + 0x2ae849, + 0x2b4848, + 0x3d1e08, + 0x2878c6, + 0x225a89, + 0x34c985, + 0x2a7d04, + 0x209007, + 0x3a6b45, + 0x223444, + 0x23e988, + 0x21a584, + 0x2c8bc7, + 0x3aa146, + 0x23fe85, + 0x2afb48, + 0x33ac8b, + 0x31f287, + 0x245246, + 0x2dbec4, + 0x3da006, + 0x2759c5, + 0x21ebc5, + 0x28be89, + 0x28fa09, + 0x2311c4, + 0x231205, + 0x224045, + 0x32ca06, + 0x31ee88, + 0x2d3506, + 0x22a78b, + 0x2be84a, + 0x2d0005, + 0x298c06, + 0x238d85, + 0x386ac5, + 0x2a35c7, + 0x30f188, + 0x29aec4, + 0x34ae06, + 0x29b0c6, + 0x215dc7, + 0x322d04, + 0x28aa06, + 0x3cd645, + 0x3cd649, + 0x2f5504, + 0x30a549, + 0x286546, + 0x2d5a08, + 0x224045, + 0x396485, + 0x20abc6, + 0x2bf809, + 0x21ae49, + 0x284d06, + 0x2f8b08, + 0x29b948, + 0x238d44, + 0x2ca304, + 0x2ca308, + 0x39ff08, + 0x2fbd09, + 0x2a80c6, + 0x216b86, + 0x33a24d, + 0x2b64c6, + 0x360289, + 0x30e485, + 0x211c46, + 0x22e2c8, + 0x336c05, + 0x21ea44, + 0x2759c5, + 0x28f088, + 0x2a7849, + 0x2828c4, + 0x2cc906, + 0x27a40a, + 0x33b008, + 0x369ac9, + 0x27600a, + 0x3c5b86, + 0x2a6888, + 0x371085, + 0x298448, + 0x303285, + 0x21cf49, + 0x33ca09, + 0x234702, + 0x2cf545, + 0x28c906, + 0x286487, + 0x2b0dc5, + 0x34b946, + 0x319908, + 0x2b2ec6, + 0x2c3ac9, + 0x285b86, + 0x28f208, + 0x2b8cc5, + 0x24e406, + 0x36cac8, + 0x28d908, + 0x35d688, + 0x31b648, + 0x218584, + 0x20c8c3, + 0x2c3d04, + 0x28cc86, + 0x326644, + 0x29d887, + 0x237d89, + 0x2db645, + 0x244806, + 0x209046, + 0x29fecb, + 0x2c6ac6, + 0x20c886, + 0x2de708, + 0x361b46, + 0x2046c3, + 0x212403, + 0x2d15c4, + 0x239a05, + 0x3039c7, + 0x282a48, + 0x282a4f, + 0x2899cb, + 0x31ec88, + 0x2cc986, + 0x31ef8e, + 0x20abc3, + 0x303944, + 0x2c6a45, + 0x2c6f46, + 0x299e8b, + 0x29d246, + 0x232009, + 0x23fe85, + 0x251748, + 0x20e288, + 0x21ad0c, + 0x2ad486, + 0x2e3646, + 0x2e1dc5, + 0x2945c8, + 0x286cc5, + 0x329f48, + 0x2a864a, + 0x2ab689, + 0x70e7c4, 0x2000c2, - 0x4b212402, + 0x53216542, 0x200382, - 0x20e704, - 0x20b982, - 0x217544, - 0x203182, - 0x5803, + 0x2b1b84, + 0x201582, + 0x28d4c4, + 0x204cc2, + 0xd903, 0x2003c2, - 0x208502, - 0xae888, - 0x4cc4, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x7542, - 0x4b202, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x1fcc2, - 0x4642, - 0x72c2, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x217fc3, - 0x23e083, - 0x219ac3, - 0x24cd44, - 0x22ea43, - 0x236704, - 0x233fc3, - 0x2e5904, - 0x266a83, - 0x215f87, - 0x23cb03, - 0x205803, - 0x321388, - 0x23e083, - 0x293b0b, - 0x2ffec3, - 0x243bc6, - 0x22dc42, - 0x2fa00b, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23e083, - 0x221d43, - 0x210cc3, + 0x202b02, + 0x793c8, + 0xe804, + 0x216543, + 0x222bc3, + 0x343b43, + 0x87c2, + 0x54202, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x2a042, + 0x6502, + 0x4a42, + 0x253c43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x2296c3, + 0x20cb83, + 0x20f7c3, + 0x25dd04, + 0x216543, + 0x23ec84, + 0x222bc3, + 0x2e8fc4, + 0x343b43, + 0x2b1087, + 0x216443, + 0x20d903, + 0x2c2308, + 0x20cb83, + 0x29fc4b, + 0x304443, + 0x24d906, + 0x213402, + 0x2fe64b, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x20cb83, + 0x21b103, + 0x2072c3, 0x2000c2, - 0xae888, - 0x334f05, - 0x34db88, - 0x2f4fc8, - 0x212402, - 0x36a4c5, - 0x3ca147, - 0x2031c2, - 0x243407, + 0x793c8, + 0x235ec5, + 0x21ec48, + 0x3585c8, + 0x216542, + 0x363605, + 0x347c07, + 0x202bc2, + 0x24d147, 0x200382, - 0x253d47, - 0x23a489, - 0x272888, - 0x347809, - 0x210382, - 0x3d5f47, - 0x32ad04, - 0x3ca207, - 0x3d7647, - 0x25a642, - 0x23cb03, - 0x20a942, - 0x203182, + 0x25be87, + 0x34c049, + 0x277f48, + 0x3d0049, + 0x214182, + 0x20e107, + 0x387c84, + 0x347cc7, + 0x2be747, + 0x2687c2, + 0x216443, + 0x203742, + 0x204cc2, 0x2003c2, - 0x205b42, + 0x208402, 0x200902, - 0x208502, - 0x2e1a45, - 0x227885, - 0x12402, - 0x33fc3, - 0x22ea43, - 0x233fc3, - 0x27e883, - 0x266a83, - 0x204903, - 0x217fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0xfe83, + 0x202b02, + 0x2e5245, + 0x227445, + 0x16542, + 0x22bc3, + 0x216543, + 0x222bc3, + 0x22de83, + 0x343b43, + 0x20e443, + 0x2296c3, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x158286, + 0x55fa5a4b, + 0x216443, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x175285, + 0x12b83, 0x101, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x2191c3, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x217c83, - 0x4e4b1706, - 0x22383, - 0xd7405, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x5242, - 0xae888, - 0x12f603, - 0x5803, - 0x1c0443, - 0x46d04, - 0x147b604, - 0xf0085, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x243543, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x2203c3, + 0x56869306, + 0x20a83, + 0x70945, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x6482, + 0x793c8, + 0x38c43, + 0xd903, + 0x7ca83, + 0x4f344, + 0x1480c44, + 0xf0b45, 0x2000c2, - 0x3993c4, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x247e03, - 0x22f845, - 0x2191c3, - 0x21e1c3, - 0x217fc3, - 0x24dfc3, - 0x23e083, - 0x208503, - 0x24cdc3, - 0x20aa43, + 0x397344, + 0x216543, + 0x222bc3, + 0x343b43, + 0x24cc43, + 0x2b96c5, + 0x243543, + 0x21b283, + 0x2296c3, + 0x257743, + 0x20cb83, + 0x202b03, + 0x2192c3, + 0x201643, + 0x11d783, 0x5c2, - 0x30242, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, + 0x386c2, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, 0x2000c2, - 0x24ac43, - 0x212402, - 0xf982, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x217fc3, - 0x23e083, - 0x208502, - 0xae888, - 0x266a83, - 0x1c0443, - 0xae888, - 0x1c0443, - 0x276243, - 0x22ea43, - 0x2319c4, - 0x233fc3, - 0x266a83, - 0x209582, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x209582, - 0x215f83, - 0x217fc3, - 0x23e083, - 0x2f8e43, - 0x208503, + 0x253c43, + 0x216542, + 0x3242, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x2296c3, + 0x20cb83, + 0x202b02, + 0x793c8, + 0x343b43, + 0x7ca83, + 0x793c8, + 0x7ca83, + 0x2cc803, + 0x216543, + 0x23a304, + 0x222bc3, + 0x343b43, + 0x2042c2, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2042c2, + 0x233243, + 0x2296c3, + 0x20cb83, + 0x2faf43, + 0x202b03, 0x2000c2, - 0x212402, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x243bc5, - 0x1375c6, - 0x24cd44, - 0x22dc42, + 0x216542, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x24d905, + 0x1143c6, + 0x6ff44, + 0x329c4, + 0x25dd04, + 0x213402, 0x882, - 0xae888, - 0xf982, - 0x4b202, - 0x2a82, + 0x793c8, + 0x3242, + 0x54202, + 0x2a02, 0x2000c2, - 0x146bc5, - 0x1ae08, - 0x125203, - 0x212402, - 0x3c904, - 0x52d16f86, - 0x1384, - 0xc634b, - 0x3a806, - 0x7f3c7, - 0x1431c9, - 0x233fc3, - 0x49e88, - 0x49e8b, - 0x4a30b, - 0x4a9cb, - 0x4ad0b, - 0x4afcb, - 0x4b40b, - 0x1cb86, - 0x266a83, - 0xf48c5, - 0x2044, - 0x20ef43, - 0x11b787, - 0xe88c4, - 0x722c4, - 0x217fc3, - 0x81006, - 0x1583c4, - 0x1c0443, - 0x23e083, - 0x300ac4, - 0x131247, - 0x1371c9, - 0xc6108, - 0x1a2584, - 0x1ca344, - 0x134c46, - 0xff48, - 0x1480c5, - 0x124e89, - 0xe783, - 0x146bc5, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x205803, - 0x23e083, - 0x2ffec3, - 0x22dc42, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e703, - 0x21e484, - 0x217fc3, - 0x5803, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x2e5904, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x243bc6, - 0x233fc3, - 0x266a83, - 0xf443, - 0x1c0443, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x146bc5, - 0x7f3c7, - 0x15c3, - 0xe783, - 0xae888, - 0x266a83, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x612c3, - 0x217fc3, - 0x23e083, - 0x5622ea43, - 0x233fc3, - 0x217fc3, - 0x23e083, - 0xae888, + 0x146c05, + 0x24948, + 0xe9883, + 0x216542, + 0x45c44, + 0x5b910646, + 0x1db84, + 0xc5e4b, + 0x42746, + 0x1cdf07, + 0x174bc9, + 0x222bc3, + 0x53188, + 0x5318b, + 0x5360b, + 0x539cb, + 0x53d0b, + 0x53fcb, + 0x5440b, + 0x18c86, + 0x343b43, + 0x154645, + 0x10e584, + 0x20b6c3, + 0x11b487, + 0x133604, + 0xed184, + 0x77984, + 0x2296c3, + 0x84e06, + 0xac8c4, + 0x7ca83, + 0x20cb83, + 0x305504, + 0x132747, + 0x113fc9, + 0xc5c08, + 0x1c8dc4, + 0x147e04, + 0x179dc3, + 0x13906, + 0x12248, + 0x18d445, + 0x1a1f49, + 0x39fc3, + 0x13ea86, + 0x146c05, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x20d903, + 0x20cb83, + 0x304443, + 0x213402, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x26a7c3, + 0x21b544, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x2e8fc4, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x24d906, + 0x222bc3, + 0x343b43, + 0x1e803, + 0x7ca83, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x146c05, + 0x1cdf07, + 0x69c3, + 0x39fc3, + 0x793c8, + 0x343b43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x722c3, + 0x2296c3, + 0x20cb83, + 0x5ee16543, + 0x222bc3, + 0x2296c3, + 0x20cb83, + 0x793c8, 0x2000c2, - 0x212402, - 0x22ea43, - 0x266a83, - 0x217fc3, + 0x216542, + 0x216543, + 0x343b43, + 0x2296c3, 0x2003c2, - 0x23e083, - 0x33c187, - 0x355d4b, - 0x211843, - 0x27da88, - 0x330107, - 0x229dc6, - 0x2d42c5, - 0x36a609, + 0x20cb83, + 0x33cf47, + 0x2f67cb, + 0x2153c3, + 0x283208, + 0x331607, + 0x349506, + 0x234d45, + 0x363749, + 0x24d688, + 0x37e649, + 0x3ae5d0, + 0x37e64b, + 0x3aaa09, + 0x2069c3, + 0x2fed09, + 0x23b286, + 0x23b28c, + 0x235f88, + 0x3e5c48, + 0x35c449, + 0x2cd20e, + 0x34be0b, + 0x2c340c, + 0x203b43, + 0x279d4c, + 0x203b49, + 0x300187, + 0x23c4cc, + 0x2c024a, + 0x21d684, + 0x21d68d, + 0x279c08, + 0x20f7cd, + 0x28a5c6, + 0x25dd0b, + 0x314149, + 0x2674c7, + 0x32cdc6, + 0x3339c9, + 0x35310a, + 0x30a088, + 0x304044, + 0x2bc2c7, + 0x24ea07, + 0x202744, + 0x2208c4, + 0x209cc9, + 0x30d489, + 0x20a888, + 0x2303c5, + 0x2140c5, + 0x20f086, + 0x21d549, + 0x283e4d, + 0x216d88, + 0x20ef87, + 0x234dc8, + 0x25b186, + 0x3e11c4, + 0x26b385, + 0x3e2f06, + 0x3e7984, + 0x203a47, + 0x20588a, + 0x216784, + 0x21a186, + 0x21a989, + 0x21a98f, + 0x21cc4d, + 0x21e706, + 0x224550, + 0x224946, + 0x226187, + 0x227f07, + 0x227f0f, + 0x229309, + 0x22cc86, + 0x22e907, + 0x22e908, + 0x22ed09, + 0x206e88, + 0x317a87, + 0x20c903, + 0x391906, + 0x37a588, + 0x2cd4ca, + 0x204189, + 0x22cb43, + 0x363506, + 0x34ac4a, + 0x282647, + 0x2fffca, + 0x31034e, + 0x229446, + 0x3d6847, + 0x24b006, + 0x203c06, + 0x38640b, + 0x218d8a, + 0x2f6e0d, + 0x2f53c7, + 0x274588, + 0x274589, + 0x27458f, + 0x2fb78c, + 0x2fb0c9, + 0x287bce, + 0x2b118a, + 0x20c606, + 0x2e9106, + 0x30cd4c, + 0x3bb08c, + 0x3d8388, + 0x338487, + 0x208c45, + 0x347e84, + 0x36430e, + 0x3109c4, + 0x3406c7, + 0x36be4a, + 0x3806d4, + 0x38a70f, + 0x2280c8, + 0x3917c8, + 0x38c34d, + 0x38c34e, + 0x3b4bc9, + 0x238308, + 0x23830f, + 0x23c1cc, + 0x23c1cf, + 0x23d147, + 0x23f90a, + 0x240acb, + 0x241348, + 0x244547, + 0x24be0d, + 0x362546, + 0x21d846, + 0x247bc9, + 0x26bac8, + 0x24db08, + 0x24db0e, + 0x26b907, + 0x308585, + 0x24f085, + 0x220744, + 0x3497c6, + 0x20a788, + 0x3a2983, + 0x2bf30e, + 0x24c1c8, + 0x3e534b, + 0x3c7807, + 0x2332c5, + 0x279ec6, + 0x2ba1c7, + 0x33e8c8, + 0x32c449, + 0x23cb05, + 0x2925c8, + 0x22fe06, + 0x3b3cca, + 0x364209, + 0x23c589, + 0x23c58b, + 0x33b7c8, + 0x202609, + 0x230486, + 0x3c298a, + 0x2a104a, + 0x23fb0c, + 0x371707, + 0x277d4a, + 0x39f78b, + 0x39f799, + 0x351a48, + 0x24d985, + 0x24bfc6, + 0x296489, + 0x250206, + 0x22b24a, + 0x2163c6, + 0x232d44, + 0x2dce0d, + 0x32d187, + 0x232d49, + 0x252185, + 0x2522c8, + 0x252f49, + 0x254744, + 0x254e07, + 0x254e08, + 0x255287, + 0x273b48, + 0x25cac7, + 0x2dfac5, + 0x26420c, + 0x2646c9, + 0x3b930a, + 0x266cc9, + 0x2fee09, + 0x26700c, + 0x26974b, + 0x26ac88, + 0x26bcc8, + 0x26f504, + 0x290988, + 0x291d49, + 0x2c0307, + 0x21abc6, + 0x2a9607, + 0x3d4bc9, + 0x21070b, + 0x247047, + 0x21fc47, + 0x3e3507, + 0x20f744, + 0x20f745, + 0x2e8cc5, + 0x357f0b, + 0x30df44, + 0x3b6648, + 0x25974a, + 0x22fec7, + 0x3e5007, + 0x299452, + 0x3d4786, + 0x239c86, + 0x340ace, + 0x3e5786, + 0x29ddc8, + 0x29f2cf, + 0x20fb88, 0x243948, - 0x381049, - 0x3ac290, - 0x38104b, - 0x215589, - 0x2015c3, - 0x2fa6c9, - 0x232646, - 0x23264c, - 0x334fc8, - 0x3dde48, - 0x26eac9, - 0x2c8b0e, - 0x23a24b, - 0x2c030c, - 0x233f03, - 0x284e8c, - 0x3e13c9, - 0x238447, - 0x233f0c, - 0x2bde8a, - 0x241ec4, - 0x38aa8d, - 0x284d48, - 0x219acd, - 0x292146, - 0x24cd4b, - 0x337349, - 0x38fa07, - 0x25f0c6, - 0x323849, - 0x35484a, - 0x30e748, - 0x2ffac4, - 0x3a8e87, - 0x3c0807, - 0x208184, - 0x2221c4, - 0x3b4809, - 0x35c549, - 0x3e1f48, - 0x2f2c85, - 0x2102c5, - 0x209a86, - 0x38a949, - 0x28654d, - 0x2ece48, - 0x209987, - 0x2d4348, - 0x26bdc6, - 0x22fa44, - 0x2a4d45, - 0x3d9f46, - 0x3dc104, - 0x3e12c7, - 0x204e8a, - 0x210e04, - 0x213386, - 0x214689, - 0x21468f, - 0x214c4d, - 0x215ac6, - 0x21aa10, - 0x21ae06, - 0x21b507, - 0x21bcc7, - 0x21bccf, - 0x21c689, - 0x224c46, - 0x225047, - 0x225048, - 0x225449, - 0x20f708, - 0x306a07, - 0x22b743, - 0x22e8c6, - 0x239148, - 0x2c8dca, - 0x20cf09, - 0x243a83, - 0x36a3c6, - 0x38524a, - 0x2fbb87, - 0x23828a, - 0x316c8e, - 0x21c7c6, - 0x32bc47, - 0x38ef46, - 0x243fc6, - 0x21cc8b, - 0x3a038a, - 0x35638d, - 0x2800c7, - 0x26dc08, - 0x26dc09, - 0x26dc0f, - 0x30a80c, - 0x265209, - 0x2bbbce, - 0x21608a, - 0x20dac6, - 0x3076c6, - 0x31a1cc, - 0x3df50c, - 0x325908, - 0x35fd07, - 0x39d4c5, - 0x3ca3c4, - 0x25fd0e, - 0x3ae384, - 0x37dd87, - 0x3a6d8a, - 0x3d7cd4, - 0x3db78f, - 0x21be88, - 0x22e788, - 0x39124d, - 0x39124e, - 0x22ed49, - 0x22fe88, - 0x22fe8f, - 0x233c0c, - 0x233c0f, - 0x234dc7, - 0x23718a, - 0x23874b, - 0x2395c8, - 0x23b807, - 0x260b4d, - 0x369646, - 0x38ac46, - 0x23d649, - 0x252848, - 0x243dc8, - 0x243dce, - 0x2bb387, - 0x305145, - 0x246ac5, - 0x206484, - 0x22a086, - 0x3e1e48, - 0x324643, - 0x2e8c8e, - 0x260f08, - 0x2acacb, - 0x276407, - 0x30b205, - 0x269c06, - 0x2b6ec7, - 0x321848, - 0x37d749, - 0x3d2cc5, - 0x28e408, - 0x228a46, - 0x3addca, - 0x25fc09, - 0x233fc9, - 0x233fcb, - 0x25c6c8, - 0x208049, - 0x2f2d46, - 0x2041ca, - 0x29d08a, - 0x23738c, - 0x375e07, - 0x27268a, - 0x331b0b, - 0x331b19, - 0x353148, - 0x243c45, - 0x260d06, - 0x211d89, - 0x3b2c46, - 0x22170a, - 0x275246, - 0x2d8384, - 0x2d838d, - 0x3b4447, - 0x368889, - 0x249285, - 0x2493c8, - 0x249c49, - 0x24bb44, - 0x24c247, - 0x24c248, - 0x24c507, - 0x26c188, - 0x251c87, - 0x2daec5, - 0x25828c, - 0x258749, - 0x31d28a, - 0x3b09c9, - 0x2fa7c9, - 0x38f54c, - 0x25accb, - 0x25c8c8, - 0x261448, - 0x264f04, - 0x28b548, - 0x28cb89, - 0x2bdf47, - 0x2148c6, - 0x2a3b07, - 0x2a0f49, - 0x354d4b, - 0x20b187, - 0x348647, - 0x3da547, - 0x219a44, - 0x219a45, - 0x2e5605, - 0x35e84b, - 0x349284, - 0x328308, - 0x30234a, - 0x228b07, - 0x3d0347, - 0x295192, - 0x293dc6, - 0x231346, - 0x34890e, - 0x294586, - 0x299e48, - 0x29aacf, - 0x219e88, - 0x28fb88, - 0x2df5ca, - 0x2df5d1, - 0x2ab64e, - 0x2550ca, - 0x2550cc, - 0x230087, - 0x230090, - 0x3d4f08, - 0x2ab845, - 0x2b71ca, - 0x3dc14c, - 0x29e08d, - 0x204906, - 0x204907, - 0x20490c, - 0x209d8c, - 0x2191cc, - 0x2c204b, - 0x3923c4, - 0x226c84, - 0x2ba749, - 0x34a1c7, - 0x382f89, - 0x29cec9, - 0x2bdb47, - 0x2bdd06, - 0x2bdd09, - 0x2be103, - 0x2af60a, - 0x323a87, - 0x3ca70b, - 0x35620a, - 0x32ad84, - 0x3c88c6, - 0x288809, - 0x3612c4, - 0x2e164a, - 0x2e2845, - 0x2cd7c5, - 0x2cd7cd, - 0x2cdb0e, - 0x2c9c05, - 0x33ae46, - 0x2437c7, - 0x2525ca, - 0x3ae686, - 0x381c04, - 0x35a607, - 0x2fc70b, - 0x26be87, - 0x2699c4, - 0x253306, - 0x25330d, - 0x2e724c, - 0x217e86, - 0x2ed04a, - 0x223486, - 0x220dc8, - 0x274707, - 0x2d5eca, - 0x2361c6, - 0x27ffc3, - 0x2f4b46, - 0x238fc8, - 0x37204a, - 0x2df007, - 0x2df008, + 0x2e75ca, + 0x2e75d1, + 0x2af74e, + 0x20294a, + 0x20294c, + 0x238507, + 0x238510, + 0x3d9e48, + 0x2af945, + 0x2ba4ca, + 0x3e79cc, + 0x2a204d, + 0x20e446, + 0x20e447, + 0x20e44c, + 0x20f3cc, + 0x26a98c, + 0x39304b, + 0x3a4f04, + 0x205604, + 0x2be009, + 0x37de47, + 0x361f89, + 0x2a0e89, + 0x2bff07, + 0x2c00c6, + 0x2c00c9, + 0x2c04c3, + 0x2b2fca, + 0x37a447, + 0x37b94b, + 0x2f6c8a, 0x25bfc4, - 0x295707, - 0x2fdf08, - 0x293fc8, - 0x2c30c8, - 0x33e14a, - 0x2ee705, - 0x2ee987, - 0x254f13, - 0x271146, - 0x20bc08, - 0x222a49, - 0x2432c8, - 0x3673cb, - 0x3cd3c8, - 0x2cab84, - 0x2c0c86, - 0x325e06, - 0x322349, - 0x2d5d07, - 0x258388, - 0x2a5c46, + 0x3ceb46, + 0x28cd09, + 0x2f44c4, + 0x2e4e4a, + 0x302645, + 0x2d1b85, + 0x2d1b8d, + 0x2d1ece, + 0x2722c5, + 0x33bc06, + 0x24d507, + 0x25d40a, + 0x231c86, + 0x37ee04, + 0x301887, + 0x300e4b, + 0x273847, + 0x2420c4, + 0x316546, + 0x31654d, + 0x2eba8c, + 0x3d1a86, + 0x216f8a, + 0x221d46, + 0x227bc8, + 0x2fd2c7, + 0x2dafca, + 0x3e7346, + 0x28aa83, + 0x354806, + 0x213448, + 0x36dc0a, + 0x25aa07, + 0x25aa08, + 0x2985c4, + 0x2a5c47, + 0x31f5c8, + 0x2f3cc8, + 0x2f1188, + 0x32a8ca, + 0x2efe85, + 0x2cb207, + 0x260e13, + 0x276586, + 0x38d188, + 0x22bb49, + 0x24d008, + 0x28794b, + 0x2ca108, + 0x2eb7c4, + 0x326e86, + 0x324186, + 0x3423c9, + 0x2dae07, + 0x264308, + 0x2aa246, 0x200bc4, - 0x3d5d85, - 0x3aa788, - 0x248e8a, - 0x2d8008, - 0x2dcf86, - 0x2a21ca, - 0x273d48, - 0x2e0648, - 0x2e18c8, - 0x2e2106, - 0x2e4286, - 0x3b048c, - 0x2e4810, - 0x2b8a85, - 0x219c88, - 0x21e910, - 0x219c90, - 0x3ac10e, - 0x3b010e, - 0x3b0114, - 0x3b934f, - 0x3b9706, - 0x3b6051, - 0x208253, - 0x2086c8, - 0x25f245, - 0x27dfc8, - 0x3a7c45, - 0x34aacc, - 0x22b989, - 0x3ae1c9, - 0x317147, - 0x237bc9, - 0x3b2807, - 0x33a486, - 0x2a4b47, - 0x202cc5, - 0x20fec3, - 0x20f443, - 0x215bc4, - 0x3dff8d, - 0x20bf4f, + 0x3de805, + 0x33f188, + 0x39000a, + 0x2dca88, + 0x2e1046, + 0x2a6a8a, + 0x36a688, + 0x3bc6c8, + 0x2e50c8, + 0x2e5906, + 0x2e7d46, + 0x3b20cc, + 0x2e8310, + 0x2e8705, + 0x20f988, + 0x288910, + 0x20f990, + 0x3ae44e, + 0x3b1d4e, + 0x3b1d54, + 0x3ba5cf, + 0x3ba986, + 0x202811, + 0x209613, + 0x32cf48, + 0x363c05, + 0x283748, + 0x32d685, + 0x348fcc, + 0x2718c9, + 0x310809, + 0x2fbfc7, + 0x368f49, + 0x3a8747, + 0x313386, + 0x26b187, + 0x2649c5, + 0x212bc3, + 0x21e803, + 0x2433c4, + 0x21574d, + 0x3c3dcf, 0x200c05, - 0x34a9c6, - 0x2200c7, - 0x334d47, - 0x37b8c6, - 0x37b8cb, - 0x2ac3c5, - 0x259986, - 0x30db47, - 0x252b89, - 0x225706, - 0x38c185, - 0x3c324b, - 0x205d06, - 0x226685, - 0x246248, - 0x296448, - 0x2ae3cc, - 0x2ae3d0, - 0x2b4f89, - 0x2c7007, - 0x2be80b, - 0x2ce086, - 0x3068ca, - 0x2a81cb, - 0x38160a, - 0x39f946, - 0x2f8d05, - 0x330006, - 0x28d548, - 0x31720a, - 0x390edc, - 0x2fff8c, - 0x300288, - 0x243bc5, - 0x387ac7, - 0x25d246, - 0x2bcc85, - 0x218386, - 0x37ba88, - 0x2cc2c7, - 0x2c8a08, - 0x27120a, - 0x3c110c, - 0x3248c9, - 0x3c1387, - 0x28d0c4, - 0x246b86, - 0x28f70a, - 0x29cfc5, - 0x221e4c, - 0x222508, - 0x2f6848, - 0x2b1a0c, - 0x31aa0c, - 0x32a8c9, - 0x32ab07, - 0x242dcc, - 0x22ac84, - 0x36fe0a, - 0x31114c, - 0x24e1cb, - 0x24f60b, - 0x2509c6, - 0x2541c7, - 0x2302c7, - 0x2302cf, - 0x312111, - 0x2eb492, - 0x25538d, - 0x25538e, - 0x2556ce, - 0x3b9508, - 0x3b9512, - 0x266448, - 0x223087, - 0x24fa4a, - 0x2af348, - 0x294545, - 0x2c258a, - 0x21b187, - 0x2f1004, - 0x20ee43, - 0x236c45, - 0x2df847, - 0x3aca87, - 0x29e28e, - 0x33dacd, - 0x350d49, - 0x31fb05, - 0x35f4c3, - 0x34ca46, - 0x259ec5, - 0x2acd08, - 0x227349, - 0x260d45, - 0x260d4f, - 0x2c6447, - 0x2154c5, - 0x276dca, - 0x205646, - 0x35d1c9, - 0x386ecc, - 0x3d2dc9, - 0x20b386, - 0x30214c, - 0x333f86, - 0x310088, - 0x331a06, - 0x36c7c6, - 0x2c34c4, - 0x3222c3, - 0x20de0a, - 0x22de51, - 0x26a90a, - 0x25b105, - 0x288207, - 0x255b47, - 0x2e8a84, - 0x2fe00b, - 0x347688, - 0x2cb146, - 0x23e285, - 0x268b84, - 0x24fc89, + 0x348ec6, + 0x22a447, + 0x235d07, + 0x355bc6, + 0x355bcb, + 0x2b0505, + 0x289146, + 0x3baf07, + 0x25e109, + 0x22eb86, + 0x3881c5, + 0x20368b, + 0x20de06, + 0x22f7c5, + 0x24fa08, + 0x2a3b08, + 0x2b3ccc, + 0x2b3cd0, + 0x2b89c9, + 0x2cad07, + 0x34de4b, + 0x2f0c46, + 0x31794a, + 0x38cf0b, + 0x314c4a, + 0x2f9406, + 0x2fae05, + 0x331506, + 0x292a88, + 0x3a5c0a, + 0x38bfdc, + 0x30450c, + 0x304808, + 0x24d905, + 0x38f6c7, + 0x2cce46, + 0x39da45, + 0x2210c6, + 0x355d88, + 0x2d0c47, + 0x2cd108, + 0x27664a, + 0x35b5cc, + 0x3a2c09, + 0x35b847, + 0x243e44, + 0x204a06, + 0x2434ca, + 0x2a0f85, + 0x22054c, + 0x220c08, + 0x236f48, + 0x32794c, + 0x33878c, + 0x35cf89, + 0x361d87, + 0x24808c, + 0x2fce84, + 0x322a4a, + 0x3e23cc, + 0x254fcb, + 0x255c8b, + 0x259186, + 0x25f087, + 0x238747, + 0x23874f, + 0x311a51, + 0x2ee312, + 0x25f3cd, + 0x25f3ce, + 0x25f70e, + 0x3ba788, + 0x3ba792, + 0x2fc688, + 0x2b1987, + 0x259c4a, + 0x2125c8, + 0x3e5745, + 0x2c618a, + 0x224cc7, + 0x2f0d84, + 0x20b5c3, + 0x23f1c5, + 0x2e7847, + 0x306587, + 0x2a224e, + 0x3dc44d, + 0x316209, + 0x207385, + 0x34f9c3, + 0x33e246, + 0x267c45, + 0x3e5588, + 0x22d149, + 0x24c005, + 0x24c00f, + 0x2c5f47, + 0x234bc5, + 0x3c81ca, + 0x20d746, + 0x246809, + 0x3599cc, + 0x37eec9, + 0x2111c6, + 0x25954c, + 0x3357c6, + 0x30fac8, + 0x38c686, + 0x278b86, + 0x2c6c44, + 0x386d83, + 0x3e380a, + 0x209211, + 0x2fb28a, + 0x3e19c5, + 0x263e87, + 0x261707, + 0x2e20c4, + 0x31f6cb, + 0x3cfec8, + 0x2cf7c6, + 0x23e5c5, + 0x257104, + 0x26f9c9, 0x2008c4, - 0x2124c7, - 0x34d185, - 0x34d187, - 0x348b45, - 0x20bb83, - 0x222f48, - 0x27e1ca, - 0x249b03, - 0x334f4a, - 0x2a9d06, - 0x260acf, - 0x2bb309, - 0x2e8c10, - 0x3064c8, - 0x2dd9c9, - 0x29f107, - 0x25328f, - 0x3bb9c4, - 0x2e5984, - 0x21ac86, - 0x2356c6, - 0x23edca, - 0x247cc6, - 0x2b9447, - 0x317c48, - 0x317e47, - 0x3192c7, - 0x31ad0a, - 0x319bcb, - 0x358f85, - 0x2eb0c8, - 0x21a303, - 0x3ccbcc, - 0x39d24f, - 0x3c7e0d, - 0x258b87, - 0x350e89, - 0x2f5c87, - 0x28c3c8, - 0x3d7ecc, - 0x2caa88, - 0x366dc8, - 0x332bce, - 0x345b54, - 0x346064, - 0x365d0a, - 0x38188b, - 0x3b28c4, - 0x3b28c9, - 0x239b88, - 0x247385, - 0x32414a, + 0x218407, + 0x37e8c5, + 0x37e8c7, + 0x340d05, + 0x212483, + 0x2b1848, + 0x2484ca, + 0x235f03, + 0x235f0a, + 0x2ae286, + 0x24bd8f, + 0x26b889, + 0x2bf290, + 0x2e2248, + 0x2e1809, 0x2a5007, - 0x215d84, - 0x24ac43, - 0x22ea43, - 0x236704, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x2191c3, - 0x23cb03, - 0x2e4806, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x216983, + 0x3164cf, + 0x3c5fc4, + 0x2e9044, + 0x2247c6, + 0x25d8c6, + 0x252c0a, + 0x24cb06, + 0x2bcc47, + 0x317dc8, + 0x317fc7, + 0x3196c7, + 0x31aa0a, + 0x319fcb, + 0x271385, + 0x2edf48, + 0x20c183, + 0x3c17cc, + 0x2089cf, + 0x22158d, + 0x35e207, + 0x236c89, + 0x35d247, + 0x2cbd48, + 0x3808cc, + 0x2eb6c8, + 0x3e16c8, + 0x33438e, + 0x345b94, + 0x3460a4, + 0x360d8a, + 0x37ea8b, + 0x3a8804, + 0x3a8809, + 0x2c6848, + 0x24fe05, + 0x3a248a, + 0x2b39c7, + 0x258bc4, + 0x253c43, + 0x216543, + 0x23ec84, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x243543, + 0x216443, + 0x2e8306, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x21f6c3, 0x2000c2, - 0x24ac43, - 0x212402, - 0x22ea43, - 0x236704, - 0x233fc3, - 0x266a83, - 0x2191c3, - 0x2e4806, - 0x217fc3, - 0x23e083, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x280203, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x23e083, + 0x253c43, + 0x216542, + 0x216543, + 0x23ec84, + 0x222bc3, + 0x343b43, + 0x243543, + 0x2e8306, + 0x2296c3, + 0x20cb83, + 0x793c8, + 0x216543, + 0x222bc3, + 0x2f5503, + 0x606296c3, + 0x7ca83, + 0x20cb83, + 0x60a01704, + 0xc1442, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x21b544, + 0x2296c3, + 0x20cb83, 0x2000c2, - 0x281bc3, - 0x212402, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x20cf02, - 0x20cdc2, - 0x212402, - 0x22ea43, - 0x204302, + 0x28bb03, + 0x216542, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x204182, + 0x204042, + 0x216542, + 0x216543, + 0x2080c2, 0x2005c2, - 0x20e704, - 0x217544, - 0x266002, - 0x21e484, + 0x2b1b84, + 0x28d4c4, + 0x233ec2, + 0x21b544, 0x2003c2, - 0x23e083, - 0x216983, - 0x2509c6, - 0x21fcc2, - 0x2072c2, - 0x223d42, - 0x58a13d83, - 0x58e30083, - 0x56486, - 0x56486, - 0x24cd44, - 0x205803, - 0x8acd, - 0x1e1cca, - 0x1cc04c, - 0x173cc, - 0xd720d, - 0x6e784, - 0x8f284, - 0x120384, - 0x146bc5, - 0x8e9c9, - 0xbf04c, - 0x1683c7, - 0x11fc6, - 0x16588, - 0x1a087, - 0x20ac8, - 0x1bdd8a, - 0x1109c7, - 0x59abd285, - 0xbd289, - 0x59c35a0b, - 0x129f08, - 0xcc4b, - 0x141488, - 0x167e89, - 0x8c80a, - 0x1316ce, - 0xbec4a, - 0xa4cd, - 0x2ed4d, - 0x14430cb, - 0xe710a, - 0x1384, - 0x59ac6, - 0xf988, - 0x10f508, - 0x35cc7, - 0x1dbc5, - 0x1fb47, - 0x34449, - 0x161347, - 0xec88, - 0x2afc9, - 0x3ea84, - 0xd3085, - 0x737ce, - 0x1410c7, - 0x5a224d46, - 0x4efcd, - 0x7f248, - 0x5a65ce86, - 0x5b05ce88, - 0x57388, - 0x13c390, - 0x5460c, - 0x68787, - 0x693c7, - 0x707c7, - 0x77c07, - 0x9a42, - 0x16e07, - 0x1a054c, - 0x5d4c5, - 0xb4e07, - 0xae286, - 0xafcc9, - 0xb3108, - 0xb5c2, + 0x20cb83, + 0x21f6c3, + 0x259186, + 0x22a042, + 0x204a42, + 0x208f02, + 0x61e0fb83, + 0x62202943, + 0x62186, + 0x62186, + 0x25dd04, + 0x20d903, + 0x1a16cd, + 0xa60a, + 0x1a02cc, + 0x8d34c, + 0x62c691cf, + 0x7074d, + 0x15c104, + 0x75104, + 0xffd44, + 0x146c05, + 0x95d89, + 0x17488c, + 0x34347, + 0x17f06, + 0x1f2c8, + 0x22a87, + 0x29e88, + 0x1beaca, + 0x1e1c47, + 0x174ac9, + 0x632ea205, + 0xea209, + 0x6343df0b, + 0x123308, + 0x3ecb, + 0x17ce88, + 0x18484a, + 0x132bce, + 0x6397448a, + 0x12808d, + 0x1b4bcd, + 0x144ce0b, + 0xeb94a, + 0x1db84, + 0x53846, + 0x89288, + 0x1dcf08, + 0x3e1c7, + 0x1e485, + 0x63eabe08, + 0x1d74c7, + 0x51b89, + 0xf4547, + 0x1c74c8, + 0x32649, + 0x48404, + 0x48985, + 0xc9ce, + 0x1402c7, + 0x6462cd86, + 0xb8d0d, + 0x1cdd88, + 0xedb48, + 0x64b7cb46, + 0x6557cb48, + 0xb3588, + 0x13d150, + 0x5fc8c, + 0x70607, + 0x71b07, + 0x75c07, + 0x7c947, + 0xc342, + 0x1d8707, + 0x18f4c, + 0x116b05, + 0xb8847, + 0xb3b86, + 0xb4e49, + 0xb7288, + 0x1ec82, 0x5c2, - 0x193c86, - 0x1c2b0b, - 0x1c2e06, - 0x6f044, - 0x1b5ac7, - 0x33449, - 0x860c9, - 0x1bb208, - 0x4b202, - 0x199249, - 0x11a08, - 0xfb54a, - 0xe689, - 0x2a8c6, - 0xdac89, - 0xe7087, - 0xe77c9, - 0xea1c8, - 0xec607, - 0xee689, - 0xf1a45, - 0xf1e10, - 0x1d60c6, - 0x1b5a05, - 0x19dfc7, - 0xbd68d, - 0x41d85, - 0xfa5c6, - 0xfadc7, - 0x100ad8, - 0x7f5c8, - 0x14978a, - 0xd782, - 0x5b7928cb, - 0x4f3ca, - 0x5a04d, - 0x2442, - 0xd4d86, - 0x13a06, - 0xa2ac8, - 0xb2e8a, - 0x3dd48, - 0x74e49, - 0x118088, - 0x6f48e, - 0x75088, - 0x14ca47, - 0x5ba5cdc4, - 0xb170d, - 0x1095c5, - 0x2748, - 0x35288, - 0x1145c6, - 0x4642, - 0xcaf44, - 0xe5006, - 0x134c46, - 0x5bd8490b, - 0x3602, + 0x190a86, + 0x67cb, + 0x6ac6, + 0x15c9c4, + 0x10f887, + 0x5e789, + 0x932c9, + 0x1bc248, + 0x54202, + 0x1971c9, + 0x17988, + 0x104e4a, + 0x65ada54b, + 0x145149, + 0x12506, + 0xdf889, + 0xeb8c7, + 0xec009, + 0xed548, + 0xeeac7, + 0xefe09, + 0xf2185, + 0xf2550, + 0x1e84c6, + 0x10f7c5, + 0x120047, + 0xb6a4d, + 0x4ab45, + 0xfec06, + 0xff487, + 0x105518, + 0xf48c8, + 0x80d4a, + 0x4a02, + 0x663a540b, + 0x666df98a, + 0x55a4a, + 0x6334d, + 0x1702, + 0xd9c46, + 0x30846, + 0xa7248, + 0xb700a, + 0x46c88, + 0x79289, + 0x118d88, + 0x6f68e, + 0x16208, + 0x13e247, + 0x66bb0284, + 0x12764d, + 0x10ba05, + 0x1a2f48, + 0x4fec8, + 0x66eaf2c8, + 0x114786, + 0x6502, + 0xcf5c4, + 0x110b46, + 0x6724b348, + 0x13906, + 0x678ddecb, + 0xe042, + 0xacc09, + 0x12d408, + 0x164647, + 0x35b4a, + 0x40407, 0x401, 0x81, - 0xbe588, - 0x5bb87, - 0x93783, - 0x5aa37e84, - 0x5ae9c0c3, + 0x183c47, + 0x116348, + 0x642c1503, + 0x1616c4, + 0xc1508, + 0xc1708, + 0xc1908, + 0x69c07, + 0x9b583, + 0x64e40644, + 0x652a0803, 0xc1, - 0x25d86, + 0x267c6, 0xc1, 0x201, - 0x25d86, - 0x93783, - 0x18b7c8, - 0x4cdc3, - 0x27c44, - 0x20f47, - 0xaa47, - 0x1571585, - 0x4e584, - 0x149307, - 0x12402, - 0x241ec4, - 0x22ea43, - 0x24d704, - 0x20e704, - 0x217fc3, - 0x222905, - 0x217c83, - 0x235403, - 0x37b845, - 0x20aa43, - 0x1be83, - 0x5ce2ea43, - 0x233fc3, - 0x4d704, - 0x33c3, - 0x266a83, + 0x267c6, + 0x9b583, + 0x65f36fc4, + 0x18b2c4, + 0x1a845, + 0x88e45, + 0x10f9c4, + 0x16684, + 0x54644, + 0x1c4b88, + 0x1866cc, + 0xe01, + 0x192c3, + 0x27804, + 0x1c4b88, + 0x677c4b88, + 0x674c3, + 0x79943, + 0x27d47, + 0x5f07, + 0x156d145, + 0x57e04, + 0x10dfc7, + 0x16542, + 0x88e04, + 0x21d684, + 0x216543, + 0x256d44, + 0x2b1b84, + 0x2296c3, + 0x22ba05, + 0x2203c3, + 0x24c343, + 0x355b45, + 0x201643, + 0x1df83, + 0x68a16543, + 0x222bc3, + 0x56d44, + 0x4f03, + 0x343b43, 0x200181, - 0x1e1c3, - 0x23cb03, - 0x217544, - 0x21e484, - 0x217fc3, - 0x4dfc3, - 0x23e083, - 0x208503, - 0xae888, + 0x1b283, + 0x216443, + 0x28d4c4, + 0x21b544, + 0x2296c3, + 0x57743, + 0x20cb83, + 0x202b03, + 0x793c8, 0x2000c2, - 0x24ac43, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x280203, + 0x253c43, + 0x216542, + 0x216543, + 0x222bc3, + 0x2f5503, 0x2005c2, - 0x20e704, - 0x2191c3, - 0x23cb03, - 0x217fc3, - 0x205803, - 0x23e083, - 0x20aa43, - 0x19d184, - 0xae888, - 0x10a087, - 0x12402, - 0x1aa705, - 0x5474f, - 0xf10c6, - 0x1454408, - 0x118cce, - 0x5de2a502, - 0x32f688, - 0x361886, - 0x24e706, - 0x39cb07, - 0x5e200c82, - 0x5e6bb188, - 0x21f28a, - 0x268208, + 0x2b1b84, + 0x243543, + 0x216443, + 0x2296c3, + 0x20d903, + 0x20cb83, + 0x201643, + 0x8904, + 0x793c8, + 0xf0007, + 0x16542, + 0x13f105, + 0x5fdcf, + 0xfa946, + 0x1472588, + 0x1190ce, + 0x69a0bc42, + 0x20bc88, + 0x20ad46, + 0x257f86, + 0x39a587, + 0x69e00c82, + 0x6a2bf108, + 0x22588a, + 0x270088, 0x200ac2, - 0x3ca549, - 0x358fc7, - 0x214846, - 0x222c89, - 0x2eeac4, - 0x3ca446, - 0x2e9104, - 0x2029c4, - 0x257b49, - 0x310e86, - 0x267c85, - 0x26b845, - 0x22f587, - 0x2de387, - 0x26b784, - 0x2d2486, - 0x301785, - 0x20ee05, - 0x25b1c5, - 0x2c2347, - 0x276245, - 0x24a0c9, - 0x37eb85, - 0x321984, - 0x3ae5c7, - 0x3b3fce, - 0x207289, - 0x3487c9, - 0x371246, - 0x2405c8, - 0x37554b, - 0x2a74cc, - 0x326f06, - 0x2c01c7, - 0x2f0d05, - 0x3163ca, - 0x3e2049, + 0x37b789, + 0x2713c7, + 0x21ab46, + 0x2b1589, + 0x2cb344, + 0x349406, + 0x2d8d04, + 0x223984, + 0x263789, + 0x3e2106, + 0x236b05, + 0x274145, + 0x3e04c7, + 0x2d3b87, + 0x2d8684, + 0x322006, + 0x3061c5, + 0x20b585, + 0x238cc5, + 0x337047, + 0x3c7645, + 0x2533c9, + 0x3411c5, + 0x33ea04, + 0x231bc7, + 0x379ece, + 0x208509, + 0x340989, + 0x36ce06, + 0x249048, + 0x370e4b, + 0x2ab90c, + 0x31a746, + 0x2c32c7, + 0x2f19c5, + 0x31270a, + 0x20a989, 0x201189, - 0x206a86, - 0x30d905, - 0x246e45, - 0x389a89, - 0x25b34b, - 0x2ef106, - 0x353806, - 0x209984, - 0x303a46, - 0x3051c8, - 0x3cbf46, - 0x267846, - 0x203048, - 0x205107, - 0x206809, - 0x208e85, - 0xae888, - 0x3d2c44, - 0x319844, - 0x210145, - 0x343c49, - 0x221287, - 0x22128b, - 0x22434a, - 0x228745, - 0x5ea087c2, - 0x3560c7, - 0x5ee2b748, - 0x206cc7, - 0x303085, - 0x35d60a, - 0x12402, - 0x28428b, - 0x28544a, - 0x24a546, - 0x20ffc3, - 0x21114d, - 0x3ca98c, - 0x203a8d, - 0x232105, - 0x3363c5, - 0x324687, - 0x206309, - 0x21f186, - 0x247b45, - 0x3401c8, - 0x2d4dc3, - 0x2f52c8, - 0x303948, - 0x3a2107, - 0x3c5d88, - 0x3c76c9, - 0x2ff5c7, - 0x3558c7, - 0x371408, - 0x38bcc4, - 0x38bcc7, - 0x292048, - 0x366406, - 0x3cb14f, - 0x265747, - 0x2d1f86, - 0x32ac45, - 0x223ec3, - 0x2479c7, - 0x38e083, - 0x24c6c6, - 0x24e486, - 0x24ff06, - 0x298ec5, - 0x26c183, - 0x398ec8, - 0x390849, - 0x3a290b, - 0x250088, - 0x251945, - 0x253645, - 0x5f2ba882, - 0x2a4c09, - 0x223307, - 0x259a05, - 0x257a47, - 0x258ec6, - 0x386ac5, - 0x259d0b, - 0x25c8c4, - 0x267dc5, - 0x267f07, - 0x27c586, - 0x27c9c5, - 0x28b987, - 0x28c147, - 0x2a9cc4, - 0x2bee4a, - 0x292b08, - 0x375809, - 0x25b985, - 0x3585c6, - 0x30538a, - 0x26d6c6, - 0x236047, - 0x272a0d, - 0x2abf09, - 0x397445, - 0x2603c7, - 0x32d088, - 0x3b38c8, - 0x20a107, - 0x20e3c6, - 0x22cc07, - 0x24d903, - 0x310e04, - 0x383405, - 0x3af807, - 0x3bae09, - 0x2f5e08, - 0x235f45, - 0x362784, - 0x250245, - 0x25ca8d, + 0x207d06, + 0x3bacc5, + 0x24f485, + 0x385e09, + 0x238e4b, + 0x3871c6, + 0x352106, + 0x20ef84, + 0x323bc6, + 0x308608, + 0x3cd246, + 0x228d06, + 0x204b88, + 0x206347, + 0x207ac9, + 0x20a245, + 0x793c8, + 0x3d7444, + 0x319c44, + 0x213f45, + 0x344589, + 0x22adc7, + 0x22adcb, + 0x22c88a, + 0x232345, + 0x6a606e82, + 0x2f6b47, + 0x6aa34fc8, + 0x207f47, + 0x21c245, + 0x2c858a, + 0x16542, + 0x289c0b, + 0x28ab0a, + 0x22bd06, + 0x2122c3, + 0x214ccd, + 0x3c338c, + 0x3dec0d, + 0x29e685, + 0x2bdd45, + 0x3a29c7, + 0x214609, + 0x225786, + 0x24c985, + 0x37f7c8, + 0x2d9c83, + 0x3588c8, + 0x323ac8, + 0x39f407, + 0x3c7188, + 0x2251c9, + 0x2d7a47, + 0x2f6347, + 0x36cfc8, + 0x37b644, + 0x37b647, + 0x28a4c8, + 0x361486, + 0x205acf, + 0x323607, + 0x321b06, + 0x361ec5, + 0x22c3c3, + 0x250b47, + 0x251243, + 0x255446, + 0x257d06, + 0x25a606, + 0x29c885, + 0x273b43, + 0x396e48, + 0x38b949, + 0x3a3c8b, + 0x25a788, + 0x25c785, + 0x25eb45, + 0x6ae5c082, + 0x26b249, + 0x3d1907, + 0x2891c5, + 0x263687, + 0x264dc6, + 0x3b2605, + 0x267a8b, + 0x26ac84, + 0x26fc45, + 0x26fd87, + 0x281a06, + 0x281e45, + 0x290dc7, + 0x291487, + 0x2ae244, + 0x37468a, + 0x297688, + 0x371109, + 0x2acac5, + 0x347486, + 0x3087ca, + 0x274046, + 0x23b8c7, + 0x2780cd, + 0x2b0049, + 0x394d85, + 0x37d287, + 0x32bbc8, + 0x36c888, + 0x3c3ac7, + 0x3ce246, + 0x22d347, + 0x257783, + 0x357f04, + 0x380f85, + 0x3b1447, + 0x3bbe49, + 0x287f08, + 0x23b7c5, + 0x382fc4, + 0x2574c5, + 0x26510d, 0x200cc2, - 0x2ce4c6, - 0x2f7986, - 0x30820a, - 0x3a0e06, - 0x3aa945, - 0x2e95c5, - 0x2e95c7, - 0x3adc0c, - 0x25720a, - 0x294d06, - 0x2e4185, - 0x303886, - 0x294fc7, - 0x296986, - 0x298dcc, - 0x222dc9, - 0x5f626207, - 0x29ae85, - 0x29ae86, - 0x29bb48, - 0x2ca185, - 0x2ac785, - 0x2ad148, - 0x2ad34a, - 0x5fa295c2, - 0x5fe0b942, - 0x309d85, - 0x26a483, - 0x32a308, - 0x210503, - 0x2ad5c4, - 0x35d30b, - 0x2a78c8, - 0x384648, - 0x6034b7c9, - 0x2b48c9, - 0x2b51c6, - 0x2b6b48, - 0x2b6d49, - 0x2b7a86, - 0x2b7c05, - 0x248486, - 0x2b8589, - 0x2cd607, - 0x394746, - 0x21b347, - 0x2014c7, - 0x213f04, - 0x6067f8c9, - 0x2bcec8, - 0x2bb088, + 0x221ec6, + 0x2f8e06, + 0x33f34a, + 0x39e886, + 0x3afc45, + 0x2d91c5, + 0x2d91c7, + 0x3b3b0c, + 0x2b340a, + 0x298d86, + 0x2e7c45, + 0x323a06, + 0x299287, + 0x29ab06, + 0x29c78c, + 0x2b16c9, + 0x6b226c47, + 0x29f685, + 0x29f686, + 0x2a0288, + 0x24c885, + 0x2b0785, + 0x2b2048, + 0x2b224a, + 0x6b6870c2, + 0x6ba10f82, + 0x368b05, + 0x317b83, + 0x23d9c8, + 0x20b383, + 0x2b24c4, + 0x24694b, + 0x2231c8, + 0x2c1bc8, + 0x6bf4a9c9, + 0x2b8309, + 0x2b8c06, + 0x2b9e48, + 0x2ba049, + 0x2ba946, + 0x2baac5, + 0x251986, + 0x2bb089, + 0x2d46c7, + 0x24e2c6, + 0x273307, + 0x37bd07, + 0x39d584, + 0x6c2f4bc9, + 0x39dc88, + 0x2bf008, 0x200e07, - 0x2d7c86, - 0x205a89, - 0x24e6c7, - 0x3c250a, - 0x3c8108, - 0x2131c7, - 0x2180c6, - 0x2a114a, - 0x32a708, - 0x2f7405, - 0x225a05, - 0x3d12c7, - 0x3264c9, - 0x32878b, - 0x39be48, - 0x37ec09, - 0x250487, - 0x2c94cc, - 0x2c9d4c, - 0x2ca04a, - 0x2ca2cc, - 0x2d3d88, - 0x2d3f88, - 0x2d4184, - 0x2d4549, - 0x2d4789, - 0x2d49ca, - 0x2d4c49, - 0x2d4fc7, - 0x3c91cc, - 0x3dc686, - 0x2723c8, - 0x26d786, - 0x3957c6, - 0x397347, - 0x3a8708, - 0x22a28b, - 0x206b87, - 0x257809, - 0x288349, - 0x2a4e07, - 0x2e9344, - 0x269087, - 0x39b786, - 0x2128c6, - 0x2ed205, - 0x2f9848, - 0x317044, - 0x317046, - 0x2570cb, - 0x2af909, - 0x385c06, - 0x267a49, - 0x210206, - 0x249088, - 0x20eb03, - 0x30da85, - 0x219849, - 0x214445, - 0x3b9e84, - 0x3d25c6, - 0x308005, - 0x20a686, - 0x31d587, - 0x355006, - 0x22c6cb, - 0x2040c7, - 0x3a9c46, - 0x278c86, - 0x22f646, - 0x26b749, - 0x3b5e0a, - 0x2cb445, - 0x205e0d, - 0x2ad446, - 0x239446, - 0x2e8b06, - 0x220d45, - 0x2f2107, - 0x30b4c7, - 0x2794ce, - 0x23cb03, - 0x2d7c49, - 0x324b49, - 0x22f2c7, - 0x271a47, - 0x2703c5, - 0x294145, - 0x60bb494f, - 0x2ddc07, - 0x2dddc8, - 0x2de184, - 0x2de646, - 0x60e46b42, - 0x2e2386, - 0x2e4806, - 0x3b564e, - 0x2f510a, - 0x2bb906, - 0x218d4a, - 0x203889, - 0x23c7c5, - 0x312d08, - 0x33dd86, - 0x2ba948, - 0x36aac8, - 0x28238b, - 0x39cc05, - 0x2762c8, - 0x20318c, - 0x302f47, - 0x24f986, - 0x30e908, - 0x229f48, - 0x6123bec2, - 0x20c70b, - 0x209089, - 0x2bf709, - 0x21a747, - 0x3c3088, - 0x6161cf48, - 0x21d84b, - 0x34bd89, - 0x28be0d, - 0x3802c8, - 0x2a1348, - 0x61a09282, - 0x228644, - 0x61e30242, - 0x3b9cc6, - 0x62200e42, - 0x2fd7ca, - 0x364486, - 0x267388, - 0x3cea88, - 0x3e1bc6, - 0x2c06c6, - 0x306246, - 0x2acc85, - 0x239dc4, - 0x62768e04, - 0x35f2c6, - 0x277a47, - 0x62a810c7, - 0x24ed8b, - 0x206ec9, - 0x33640a, - 0x2f4dc4, - 0x2e9708, - 0x39450d, - 0x2fe709, - 0x2fe948, - 0x2febc9, - 0x300ac4, - 0x266344, - 0x393505, - 0x33f0cb, - 0x2a7846, - 0x35f105, - 0x236dc9, - 0x2d2548, - 0x2aa9c4, - 0x316549, - 0x3c22c5, - 0x2de3c8, - 0x355f87, - 0x348bc8, - 0x288a06, - 0x20f5c7, - 0x2e8689, - 0x3c33c9, - 0x226705, - 0x23b4c5, - 0x62e13242, - 0x321744, - 0x232345, - 0x39ca06, - 0x33eac5, - 0x23fa07, - 0x35f3c5, - 0x27c5c4, - 0x371306, - 0x247bc7, - 0x238e46, - 0x30c645, - 0x214048, - 0x361a85, - 0x21e147, - 0x225209, - 0x2afa4a, - 0x266f07, - 0x266f0c, - 0x267c46, - 0x23df09, - 0x248085, - 0x2d2ec8, - 0x202443, - 0x2f2d05, - 0x3c0e45, - 0x282fc7, - 0x63201242, - 0x2f9b87, - 0x2f0906, - 0x3862c6, - 0x2f2586, - 0x229e86, - 0x23be08, - 0x27e105, - 0x2d2047, - 0x2d204d, - 0x20ee43, - 0x3dcb45, - 0x276b87, - 0x2f9ec8, - 0x276745, - 0x216bc8, - 0x382e86, - 0x29c107, - 0x2d6945, - 0x39cc86, - 0x399445, - 0x21ef4a, - 0x301b46, - 0x274587, - 0x2ce285, - 0x310687, - 0x35a584, - 0x3b9e06, - 0x312c45, - 0x33544b, - 0x39b609, - 0x281cca, - 0x226788, - 0x393f88, - 0x31408c, - 0x3d8d07, - 0x31c848, - 0x31ecc8, - 0x32b9c5, - 0x35c18a, - 0x35f4c9, - 0x63600ec2, - 0x20b086, - 0x260d44, - 0x2fc549, - 0x240e89, - 0x246907, - 0x27bfc7, - 0x29cd49, - 0x33e348, - 0x33e34f, - 0x22d606, - 0x2e6ccb, - 0x256845, - 0x256847, - 0x381cc9, - 0x21fe06, - 0x3164c7, - 0x2eb805, - 0x232004, - 0x307ec6, - 0x206244, - 0x3ba247, - 0x3792c8, - 0x63b0d808, - 0x30fa05, - 0x30fb47, - 0x3532c9, - 0x20c4c4, - 0x241948, - 0x63e653c8, - 0x2e8a84, - 0x2f6dc8, - 0x25f184, - 0x206109, - 0x220c85, - 0x6422dc42, - 0x22d645, - 0x2dfd45, - 0x2600c8, - 0x234c07, - 0x646008c2, - 0x3c7a85, - 0x2e04c6, - 0x24d186, - 0x321708, - 0x31f148, - 0x33ea86, - 0x34a046, - 0x30a149, - 0x386206, - 0x21fccb, - 0x229d05, - 0x2af286, - 0x368048, - 0x34ae46, - 0x2bc246, - 0x2178ca, - 0x2e1b0a, - 0x23eb45, - 0x29c787, - 0x27a146, - 0x64a034c2, - 0x276cc7, - 0x367145, - 0x305304, - 0x305305, - 0x2f4cc6, - 0x278807, - 0x21ac85, - 0x240f44, - 0x2c3708, - 0x2bc305, - 0x37a987, - 0x3808c5, - 0x21ee85, - 0x245744, - 0x245749, - 0x3015c8, - 0x359586, - 0x358846, - 0x363f06, - 0x64fcfc08, - 0x3d8b87, - 0x31474d, - 0x314f0c, - 0x315509, - 0x315749, - 0x65379942, - 0x3d7403, - 0x20e483, - 0x39b845, - 0x3af90a, - 0x33e946, - 0x2365c5, - 0x31e244, - 0x31e24b, - 0x33384c, - 0x33410c, - 0x334415, - 0x335e0d, - 0x337e8f, - 0x338252, - 0x3386cf, - 0x338a92, - 0x338f13, - 0x3393cd, - 0x33998d, - 0x339d0e, - 0x33a60e, - 0x33ac0c, - 0x33afcc, - 0x33b40b, - 0x33be8e, - 0x33c792, - 0x33e70c, - 0x3403d0, - 0x34e4d2, - 0x34f54c, - 0x34fc0d, - 0x34ff4c, - 0x3524d1, - 0x35398d, - 0x35ae4d, - 0x35b44a, - 0x35b6cc, - 0x35e60c, - 0x35ee0c, - 0x35f70c, - 0x362e93, - 0x363610, - 0x363a10, - 0x36460d, - 0x364c0c, - 0x365a49, - 0x3697cd, - 0x369b13, - 0x36b451, - 0x36bc53, - 0x36c94f, - 0x36cd0c, - 0x36d00f, - 0x36d3cd, - 0x36d9cf, - 0x36dd90, - 0x36e80e, - 0x37198e, - 0x3722d0, - 0x37318d, - 0x373b0e, - 0x373e8c, - 0x374fd3, - 0x37768e, - 0x377c10, - 0x378011, - 0x37844f, - 0x378813, - 0x3794cd, - 0x37980f, - 0x379bce, - 0x37a150, - 0x37a549, - 0x37bc90, - 0x37c18f, - 0x37c80f, - 0x37cbd2, - 0x37f68e, - 0x3804cd, - 0x380a0d, - 0x380d4d, - 0x381f0d, - 0x38224d, - 0x382590, - 0x38298b, - 0x3831cc, - 0x38354c, - 0x383b4c, - 0x383e4e, - 0x393650, - 0x395112, - 0x39558b, - 0x395f8e, - 0x39630e, - 0x396b8e, - 0x39710b, - 0x65797596, - 0x397e8d, - 0x398a14, - 0x39970d, - 0x39c255, - 0x39ea0d, - 0x39f38f, - 0x39fb4f, - 0x3a2bcf, - 0x3a2f8e, - 0x3a330d, - 0x3a4891, - 0x3a7ecc, - 0x3a81cc, - 0x3a84cb, - 0x3a890c, - 0x3a904f, - 0x3a9412, - 0x3aa1cd, - 0x3abe8c, - 0x3acc4c, - 0x3acf4d, - 0x3ad28f, - 0x3ad64e, - 0x3af5cc, - 0x3afb8d, - 0x3afecb, - 0x3b078c, - 0x3b108d, - 0x3b13ce, - 0x3b1749, - 0x3b2dd3, - 0x3b688d, - 0x3b6f8d, - 0x3b758c, - 0x3b7c0e, - 0x3b830f, - 0x3b86cc, - 0x3b89cd, - 0x3b8d0f, - 0x3b90cc, - 0x3ba40c, - 0x3ba8cc, - 0x3babcc, - 0x3bbb8d, - 0x3bbed2, - 0x3bc64c, - 0x3bc94c, - 0x3bcc51, - 0x3bd08f, - 0x3bd44f, - 0x3bd813, - 0x3be60e, - 0x3be98f, - 0x3bed4c, - 0x65bbf40e, - 0x3bf78f, - 0x3bfb56, - 0x3c1bd2, - 0x3c440c, - 0x3c4d8f, - 0x3c540d, - 0x3cec8f, - 0x3cf04c, - 0x3cf34d, - 0x3cf68d, - 0x3d0d4e, - 0x3d19cc, - 0x3d420c, - 0x3d4510, - 0x3d6791, - 0x3d6bcb, - 0x3d700c, - 0x3d730e, - 0x3d91d1, - 0x3d960e, - 0x3d998d, - 0x3de2cb, - 0x3debcf, - 0x3dfa54, - 0x23ca82, - 0x23ca82, - 0x203183, - 0x23ca82, - 0x203183, - 0x23ca82, + 0x2dc706, + 0x20db89, + 0x257f47, + 0x3c840a, + 0x3ce388, + 0x21f107, + 0x221886, + 0x29ac4a, + 0x3a6c88, + 0x2f8885, + 0x22f6c5, + 0x31bd47, + 0x324849, + 0x32864b, + 0x3bc408, + 0x341249, + 0x25b607, + 0x2cdbcc, + 0x2ce30c, + 0x2ce60a, + 0x2ce88c, + 0x2d8888, + 0x2d8a88, + 0x2d8c84, + 0x2d9409, + 0x2d9649, + 0x2d988a, + 0x2d9b09, + 0x2d9e87, + 0x3cb8cc, + 0x3e7f06, + 0x277a88, + 0x274106, + 0x392b46, + 0x394c87, + 0x3ab788, + 0x3499cb, + 0x207e07, + 0x263fc9, + 0x28d5c9, + 0x252907, + 0x24b5c4, + 0x26bfc7, + 0x2d2bc6, + 0x218946, + 0x217145, + 0x2db8c8, + 0x310704, + 0x310706, + 0x2b32cb, + 0x266749, + 0x25b246, + 0x228f09, + 0x214006, + 0x38f0c8, + 0x271f43, + 0x3bae45, + 0x218a89, + 0x3e97c5, + 0x308104, + 0x3b7146, + 0x36aa05, + 0x260006, + 0x31c407, + 0x2109c6, + 0x2374cb, + 0x3c2887, + 0x267786, + 0x27e3c6, + 0x3e0586, + 0x2d8649, + 0x20308a, + 0x2cfdc5, + 0x2fcb0d, + 0x2b2346, + 0x259946, + 0x2e2146, + 0x227b45, + 0x2f2847, + 0x233587, + 0x27ec0e, + 0x216443, + 0x2dc6c9, + 0x3a1c09, + 0x312b07, + 0x276e87, + 0x291945, + 0x2f3e45, + 0x6c609e0f, + 0x2e1a47, + 0x2e1c08, + 0x2e1f04, + 0x2e2446, + 0x6ca4f102, + 0x2e5b86, + 0x2e8306, + 0x30f40e, + 0x35870a, + 0x2c7906, + 0x21498a, + 0x20d109, + 0x23f7c5, + 0x30bfc8, + 0x3dc706, + 0x2be208, + 0x343648, + 0x285dcb, + 0x39a685, + 0x3c76c8, + 0x204ccc, + 0x21c107, + 0x259b86, + 0x36a848, + 0x349688, + 0x6ce4ba82, + 0x32e38b, + 0x211e89, + 0x20a449, + 0x3c2187, + 0x3a8bc8, + 0x6d21e1c8, + 0x32c1cb, + 0x268d09, + 0x29420d, + 0x306f08, + 0x3c5088, + 0x6d603c82, + 0x210c84, + 0x6da386c2, + 0x377a06, + 0x6de00e42, + 0x3022ca, + 0x2b0606, + 0x22fc48, + 0x2b1e48, + 0x260946, + 0x2c37c6, + 0x3090c6, + 0x3e5505, + 0x2417c4, + 0x6e235504, + 0x3597c6, + 0x281447, + 0x6e684ec7, + 0x391e0b, + 0x208149, + 0x2bdd8a, + 0x2d9304, + 0x258208, + 0x24e08d, + 0x302bc9, + 0x302e08, + 0x303089, + 0x305504, + 0x251104, + 0x28c445, + 0x20508b, + 0x223146, + 0x359605, + 0x23f349, + 0x3220c8, + 0x2aeb04, + 0x312889, + 0x21eec5, + 0x2d3bc8, + 0x2f6a07, + 0x340d88, + 0x28cf06, + 0x206d47, + 0x2ecf49, + 0x203809, + 0x22f845, + 0x2b0d05, + 0x6ea1f182, + 0x33e7c4, + 0x244985, + 0x39a486, + 0x34b885, + 0x303c87, + 0x3598c5, + 0x281a44, + 0x36cec6, + 0x24ca07, + 0x3a01c6, + 0x32c605, + 0x212788, + 0x20af45, + 0x21b207, + 0x22c649, + 0x26688a, + 0x2344c7, + 0x2344cc, + 0x236ac6, + 0x242d89, + 0x24c505, + 0x24c7c8, + 0x22ea03, + 0x230445, + 0x2c7d85, + 0x286a07, + 0x6ee01242, + 0x2fe1c7, + 0x2eef06, + 0x3ad646, + 0x2f2006, + 0x3495c6, + 0x24b9c8, + 0x283885, + 0x321bc7, + 0x321bcd, + 0x20b5c3, + 0x3e83c5, + 0x3c7f87, + 0x2fe508, + 0x3c7b45, + 0x21f908, + 0x35b2c6, + 0x2ea3c7, + 0x2f5685, + 0x39a706, + 0x3973c5, + 0x22554a, + 0x2f9546, + 0x2315c7, + 0x320285, + 0x2fdec7, + 0x301804, + 0x308086, + 0x30bf05, + 0x23640b, + 0x2d2a49, + 0x28bc0a, + 0x22f8c8, + 0x377b48, + 0x30fecc, + 0x310c87, + 0x31ea88, + 0x391308, + 0x3d65c5, + 0x32a40a, + 0x34f9c9, + 0x6f200ec2, + 0x210606, + 0x24c004, + 0x300c89, + 0x247989, + 0x24eec7, + 0x284447, + 0x2a0d09, + 0x32aac8, + 0x32aacf, + 0x22dd46, + 0x2e9ecb, + 0x261445, + 0x261447, + 0x3572c9, + 0x22a186, + 0x312807, + 0x2ee685, + 0x23a944, + 0x34cb86, + 0x2174c4, + 0x2c9147, + 0x360648, + 0x6f7babc8, + 0x30d045, + 0x30d187, + 0x351bc9, + 0x211c44, + 0x24a5c8, + 0x6fb04c88, + 0x2e20c4, + 0x33ed08, + 0x32ce84, + 0x217389, + 0x227a85, + 0x6fe13402, + 0x22dd85, + 0x2ed405, + 0x3b69c8, + 0x23cf87, + 0x702008c2, + 0x3c2645, + 0x2e4146, + 0x25fb06, + 0x33e788, + 0x348688, + 0x34b846, + 0x37dcc6, + 0x2f00c9, + 0x3ad586, + 0x22a04b, + 0x349345, + 0x259386, + 0x261f88, + 0x362646, + 0x29e506, + 0x22000a, + 0x2e530a, + 0x22be45, + 0x24f187, + 0x27f886, + 0x70605002, + 0x3c80c7, + 0x38fb05, + 0x308744, + 0x308745, + 0x258106, + 0x27df47, + 0x2247c5, + 0x247a44, + 0x2e2708, + 0x29e5c5, + 0x355387, + 0x383485, + 0x225485, + 0x265c84, + 0x265c89, + 0x306008, + 0x2017c6, + 0x347706, + 0x3b6c06, + 0x70bd38c8, + 0x3dc2c7, + 0x31490d, + 0x314ecc, + 0x3154c9, + 0x315709, + 0x70f75ac2, + 0x3db703, + 0x22bec3, + 0x2d2c85, + 0x3b154a, + 0x33e646, + 0x34ce85, + 0x31cbc4, + 0x31cbcb, + 0x33508c, + 0x33594c, + 0x335c55, + 0x33698d, + 0x338a8f, + 0x338e52, + 0x3392cf, + 0x339692, + 0x339b13, + 0x339fcd, + 0x33a58d, + 0x33a90e, + 0x33b2ce, + 0x33b9cc, + 0x33bd8c, + 0x33c1cb, + 0x33cc4e, + 0x33d552, + 0x33e40c, + 0x33f5d0, + 0x34cfd2, + 0x34e1cc, + 0x34e88d, + 0x34ebcc, + 0x350dd1, + 0x35228d, + 0x355f8d, + 0x35658a, + 0x35680c, + 0x357ccc, + 0x35930c, + 0x359ccc, + 0x35dad3, + 0x35e550, + 0x35e950, + 0x35f34d, + 0x35f94c, + 0x360ac9, + 0x36290d, + 0x362c53, + 0x364b11, + 0x365313, + 0x36664f, + 0x366a0c, + 0x366d0f, + 0x3670cd, + 0x3676cf, + 0x367a90, + 0x36850e, + 0x36d54e, + 0x36de90, + 0x36ea8d, + 0x36f40e, + 0x36f78c, + 0x3708d3, + 0x3725ce, + 0x373310, + 0x373711, + 0x373b4f, + 0x373f13, + 0x37564d, + 0x37598f, + 0x375d4e, + 0x3762d0, + 0x3766c9, + 0x377d50, + 0x37824f, + 0x3788cf, + 0x378c92, + 0x37c3ce, + 0x37d94d, + 0x37e00d, + 0x37e34d, + 0x37f9cd, + 0x37fd0d, + 0x380050, + 0x38044b, + 0x380d4c, + 0x3810cc, + 0x3816cc, + 0x3819ce, + 0x390450, + 0x392492, + 0x39290b, + 0x3938ce, + 0x393c4e, + 0x3944ce, + 0x394a4b, + 0x71394ed6, + 0x395e0d, + 0x396994, + 0x39768d, + 0x399cd5, + 0x39b8cd, + 0x39c24f, + 0x39cb8f, + 0x3a3f4f, + 0x3a430e, + 0x3a468d, + 0x3a6611, + 0x3aaf4c, + 0x3ab24c, + 0x3ab54b, + 0x3ab98c, + 0x3ac40f, + 0x3ac7d2, + 0x3acdcd, + 0x3ae1cc, + 0x3aec8c, + 0x3aef8d, + 0x3af2cf, + 0x3af68e, + 0x3b120c, + 0x3b17cd, + 0x3b1b0b, + 0x3b23cc, + 0x3b318d, + 0x3b34ce, + 0x3b3849, + 0x3b50d3, + 0x3b798d, + 0x3b808d, + 0x3b868c, + 0x3b8d0e, + 0x3b958f, + 0x3b994c, + 0x3b9c4d, + 0x3b9f8f, + 0x3ba34c, + 0x3bb38c, + 0x3bb90c, + 0x3bbc0c, + 0x3bc8cd, + 0x3bcc12, + 0x3bd38c, + 0x3bd68c, + 0x3bd991, + 0x3bddcf, + 0x3be18f, + 0x3be553, + 0x3bf34e, + 0x3bf6cf, + 0x3bfa8c, + 0x717c014e, + 0x3c04cf, + 0x3c0896, + 0x3c1b92, + 0x3c528c, + 0x3c618f, + 0x3c680d, + 0x3d294f, + 0x3d2d0c, + 0x3d300d, + 0x3d334d, + 0x3d51ce, + 0x3d5d0c, + 0x3d914c, + 0x3d9450, + 0x3daa91, + 0x3daecb, + 0x3db30c, + 0x3db60e, + 0x3dd8d1, + 0x3ddd0e, + 0x3de08d, + 0x3e5f8b, + 0x3e688f, + 0x3e74d4, + 0x2038c2, + 0x2038c2, + 0x204cc3, + 0x2038c2, + 0x204cc3, + 0x2038c2, 0x201082, - 0x2484c5, - 0x3d8ecc, - 0x23ca82, - 0x23ca82, + 0x2519c5, + 0x3dd5cc, + 0x2038c2, + 0x2038c2, 0x201082, - 0x23ca82, - 0x29c945, - 0x2afa45, - 0x23ca82, - 0x23ca82, - 0x208a02, - 0x29c945, - 0x336689, - 0x36b14c, - 0x23ca82, - 0x23ca82, - 0x23ca82, - 0x23ca82, - 0x2484c5, - 0x23ca82, - 0x23ca82, - 0x23ca82, - 0x23ca82, - 0x208a02, - 0x336689, - 0x23ca82, - 0x23ca82, - 0x23ca82, - 0x2afa45, - 0x23ca82, - 0x2afa45, - 0x36b14c, - 0x3d8ecc, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x217fc3, - 0x23e083, - 0x1e14cf, - 0x1b4508, - 0x6704, - 0x5803, - 0x8efc8, - 0x1d1843, + 0x2038c2, + 0x2a0905, + 0x266885, + 0x2038c2, + 0x2038c2, + 0x20bd42, + 0x2a0905, + 0x337209, + 0x36480c, + 0x2038c2, + 0x2038c2, + 0x2038c2, + 0x2038c2, + 0x2519c5, + 0x2038c2, + 0x2038c2, + 0x2038c2, + 0x2038c2, + 0x20bd42, + 0x337209, + 0x2038c2, + 0x2038c2, + 0x2038c2, + 0x266885, + 0x2038c2, + 0x266885, + 0x36480c, + 0x3dd5cc, + 0x253c43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x2296c3, + 0x20cb83, + 0x3c4f, + 0x12d248, + 0x6f7c4, + 0xd903, + 0x17b4c8, + 0x1d5b83, 0x2000c2, - 0x66a12402, - 0x241283, - 0x25a584, - 0x2033c3, - 0x38a0c4, - 0x231346, - 0x222743, - 0x3d2484, - 0x3517c5, - 0x23cb03, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x226e0a, - 0x2509c6, - 0x39668c, - 0xae888, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x215f83, - 0x2e4806, - 0x217fc3, - 0x23e083, - 0x216983, - 0xe783, - 0xaeec8, - 0x675dc3c5, - 0x49647, - 0x146bc5, - 0xcd89, - 0x8c02, - 0x1c73ca, - 0x683a7b85, - 0x146bc5, - 0x1683c7, - 0x74f88, - 0xb74e, - 0x90d92, - 0x123bcb, - 0x110ac6, - 0x686bd285, - 0x68abf28c, - 0x149147, - 0x178d87, - 0x127eca, - 0x3c290, - 0x1645, - 0xc634b, - 0x10f508, - 0x35cc7, - 0xbc3cb, - 0x34449, - 0x48687, - 0x161347, - 0xef347, - 0x35c06, - 0xec88, - 0x69036fc6, - 0x3dc87, - 0x176b06, - 0x4efcd, - 0xe9890, - 0x694293c2, - 0x7f248, - 0x8c550, - 0x184e8c, - 0x69b8794d, - 0x5a388, - 0x5a80b, - 0x71007, - 0x96d89, - 0x56546, - 0x9bd48, - 0x5b542, - 0x1b21ca, - 0x65a87, - 0xb4e07, - 0xafcc9, - 0xb3108, - 0xf48c5, - 0x193c86, - 0x1c2e06, - 0xffb4e, - 0xef90e, - 0x18ee0f, - 0x33449, - 0x860c9, - 0x1b1d4b, - 0xb538f, - 0xc470c, - 0xcfe0b, - 0x11d1c8, - 0x16f747, - 0x194c88, - 0x1a8c8b, - 0xb920c, - 0xb960c, - 0xb9a0c, - 0xb9d0d, - 0x1bb208, - 0x50d42, - 0x199249, - 0x15d048, - 0x1de00b, - 0xd7e86, - 0xdfe8b, - 0x13c2cb, - 0xeaf4a, - 0xec7c5, - 0xf1e10, - 0xf6a46, - 0x155146, - 0x1b5a05, - 0x19dfc7, - 0xe2608, - 0xfadc7, - 0xfb087, - 0x1416c7, - 0xccec6, - 0x1b9b0a, - 0xae70a, - 0x13a06, - 0xb4bcd, - 0x3dd48, - 0x118088, - 0x1188c9, - 0xc7c05, - 0x1b800c, - 0xb9f0b, - 0x15ca49, - 0x1d1204, - 0x114389, - 0x1145c6, - 0x156786, - 0x3c986, - 0x72c2, - 0x134c46, - 0x1496cb, - 0x11e987, - 0x11eb47, - 0x3602, - 0xd9785, - 0x2de44, + 0x72616542, + 0x249f03, + 0x23adc4, + 0x204f03, + 0x36c284, + 0x239c86, + 0x220e43, + 0x3b7004, + 0x2999c5, + 0x216443, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x23098a, + 0x259186, + 0x393fcc, + 0x793c8, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x233243, + 0x2e8306, + 0x2296c3, + 0x20cb83, + 0x21f6c3, + 0x39fc3, + 0xb4388, + 0x731e7c45, + 0x7c4c7, + 0xb1845, + 0x52547, + 0x146c05, + 0x4009, + 0xad42, + 0x1c138a, + 0x73f2d5c5, + 0x146c05, + 0x34347, + 0x16108, + 0x10d8e, + 0x95292, + 0x130e0b, + 0x1e1d46, + 0x742ea205, + 0x7479e04c, + 0x10de07, + 0xb46c7, + 0x1b620a, + 0x44ad0, + 0x17be85, + 0xc5e4b, + 0x1dcf08, + 0x3e1c7, + 0x3aa4b, + 0x51b89, + 0x873c7, + 0xf4547, + 0x187407, + 0x3e106, + 0x1c74c8, + 0x74c32f46, + 0x46bc7, + 0xc7e86, + 0xb8d0d, + 0x96110, + 0x75013242, + 0x1cdd88, + 0x184590, + 0x18ed0c, + 0x7578f54d, + 0x68508, + 0x6898b, + 0x76447, + 0x19a49, + 0x62246, + 0xa0488, + 0x5102, + 0x9c50a, + 0x36947, + 0xb8847, + 0xb4e49, + 0xb7288, + 0x154645, + 0x190a86, + 0x6ac6, + 0x1040ce, + 0x422ce, + 0x4aecf, + 0x5e789, + 0x932c9, + 0x9c08b, + 0xbb44f, + 0x1dd2cc, + 0xd4f4b, + 0x1b9248, + 0x191d07, + 0x19b308, + 0xbc0cb, + 0xbca0c, + 0xbce0c, + 0xbd20c, + 0xbd50d, + 0x1bc248, + 0x5adc2, + 0x1971c9, + 0x46688, + 0xda88b, + 0xdc906, + 0xe3acb, + 0x13d08b, + 0xeddca, + 0xeec85, + 0xf2550, + 0xf8286, + 0x583c6, + 0x10f7c5, + 0x120047, + 0xfa348, + 0xff487, + 0xff747, + 0x69587, + 0xd1846, + 0x17784a, + 0xb400a, + 0x30846, + 0xb860d, + 0x46c88, + 0x118d88, + 0xef809, + 0x1b2a09, + 0xcc205, + 0x176a8c, + 0xbd70b, + 0x10d989, + 0x112cc4, + 0x114549, + 0x114786, + 0x143506, + 0x4a42, + 0x13906, + 0x80c8b, + 0x11de07, + 0x11dfc7, + 0xe042, + 0xde645, + 0x9204, 0x101, - 0x506c3, - 0x68e6a646, - 0x9c0c3, + 0x5b843, + 0x74b26806, + 0xa0803, 0x382, - 0x2b104, + 0x1504, 0xac2, - 0x4cd44, + 0x5dd04, 0x882, - 0x7282, - 0x6c02, - 0x10bf02, - 0xcf02, - 0xbd282, + 0x8502, + 0x4702, + 0x128c42, + 0x4182, + 0xea202, 0xd42, - 0x161e82, - 0x37402, - 0xda02, - 0xf982, - 0x4e682, - 0x33fc3, + 0x2e702, + 0x3fb82, + 0xc542, + 0x3242, + 0x57f02, + 0x22bc3, 0x942, - 0x31c2, - 0xfa02, - 0x91c2, + 0x2bc2, + 0x18242, + 0xeb02, 0x642, - 0x32702, - 0xb5c2, - 0x8fc2, - 0xf782, + 0x3b342, + 0x1ec82, + 0x8e82, + 0x5502, 0x5c2, - 0x191c3, - 0x4b82, - 0x22c2, - 0x4b202, - 0x6902, - 0x2702, - 0xa682, - 0x4202, - 0x8c82, - 0xb982, - 0x193b42, - 0x720c2, - 0xcac2, - 0x17fc3, + 0x43543, + 0x2642, + 0x6002, + 0x54202, + 0x7bc2, + 0x9d42, + 0x10442, + 0x205c2, + 0x11de42, + 0x1582, + 0x10f082, + 0x77782, + 0xa9542, + 0x296c3, 0x602, - 0x3bec2, - 0x2542, - 0x35c2, - 0x26685, - 0x4fc2, - 0x42c42, - 0x3d583, + 0x4ba82, + 0x1cc2, + 0x2d4c2, + 0x2f7c5, + 0x59c2, + 0x4cec2, + 0x179e43, 0x682, - 0xd782, - 0x2442, - 0xab02, - 0xee42, + 0x4a02, + 0x1702, + 0x4ac2, + 0xb5c2, 0x8c2, - 0x4642, - 0x72c2, - 0x8cc5, - 0x69e01082, - 0x6a2e82c3, - 0x1ac3, - 0x6a601082, - 0x1ac3, - 0x7a807, - 0x2089c3, + 0x6502, + 0x4a42, + 0x3ec5, + 0x75a01082, + 0x75eecb83, + 0x9983, + 0x76201082, + 0x9983, + 0xdc1c7, + 0x215483, 0x2000c2, - 0x22ea43, - 0x233fc3, - 0x280203, + 0x216543, + 0x222bc3, + 0x2f5503, 0x2005c3, - 0x215f83, - 0x217fc3, - 0x205803, - 0x23e083, - 0x2bd443, - 0xc7c44, - 0x16acc5, - 0x105085, - 0x10103, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x280203, - 0x23cb03, - 0x217fc3, - 0x205803, - 0x1c0443, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x200181, - 0x23cb03, - 0x217fc3, - 0x24dfc3, - 0x23e083, - 0x2f44, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x275243, - 0x280203, - 0x2d2a83, - 0x2381c3, - 0x2a49c3, + 0x233243, + 0x2296c3, + 0x20d903, + 0x20cb83, + 0x2a0843, + 0xcc244, + 0x143845, + 0x1084c5, + 0x1a143, + 0x793c8, + 0x216543, + 0x222bc3, + 0x2f5503, + 0x216443, + 0x2296c3, 0x20d903, - 0x266a83, - 0x20e704, - 0x217fc3, - 0x23e083, - 0x20aa43, - 0x207d44, - 0x25cc83, - 0x33f03, - 0x238f43, - 0x32dac8, - 0x2a1184, + 0x7ca83, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x200181, + 0x216443, + 0x2296c3, + 0x257743, + 0x20cb83, + 0x1a3904, + 0x253c43, + 0x216543, + 0x222bc3, + 0x2163c3, + 0x2f5503, + 0x322603, + 0x285483, + 0x2b05c3, + 0x205383, + 0x343b43, + 0x2b1b84, + 0x2296c3, + 0x20cb83, + 0x201643, + 0x202304, + 0x239843, + 0x3b43, + 0x2133c3, + 0x32f308, + 0x29ac84, 0x20020a, - 0x385986, - 0x1530c4, - 0x3bc307, - 0x21bfca, - 0x22d4c9, - 0x3c7247, - 0x3c9c8a, - 0x24ac43, - 0x309e0b, - 0x20d849, - 0x2fde05, - 0x3ba707, - 0x12402, - 0x22ea43, - 0x2264c7, - 0x224685, - 0x2e9209, - 0x233fc3, - 0x23a086, - 0x2d3383, - 0xf0983, - 0x11bf06, - 0x8886, - 0x226c7, - 0x228c86, - 0x30bf45, - 0x208f47, - 0x319107, - 0x6d266a83, - 0x34f787, - 0x23ca83, - 0x21df05, - 0x20e704, - 0x275f48, - 0x3c410c, - 0x2be385, - 0x2ac086, - 0x226387, - 0x3c1447, - 0x269207, - 0x277008, - 0x31b18f, - 0x22d705, - 0x241387, - 0x211647, - 0x3caf0a, - 0x340009, - 0x32e945, - 0x34ef0a, - 0xdd286, - 0xc8087, - 0x2d3405, - 0x2f83c4, - 0x3e1b06, - 0x14e1c6, - 0x385107, - 0x250c47, - 0x3c5f48, - 0x212a05, - 0x224586, - 0x168688, - 0x2677c5, - 0x67986, - 0x2f5b85, - 0x267704, - 0x3c2387, - 0x23bc4a, - 0x2a6688, - 0x25f986, - 0x15f83, - 0x2ee705, - 0x354bc6, - 0x3c9406, - 0x3b5906, - 0x23cb03, - 0x3aa447, - 0x2115c5, - 0x217fc3, - 0x2eb20d, - 0x205803, - 0x3c6048, - 0x215c44, - 0x27c885, - 0x2ad606, - 0x358146, - 0x2af187, - 0x2a4a07, - 0x28dfc5, - 0x23e083, - 0x36f847, - 0x38a449, - 0x325009, - 0x3624ca, - 0x201b42, - 0x21dec4, - 0x304f84, - 0x2f9707, - 0x2f9a48, - 0x2fbfc9, - 0x3dca09, - 0x2fc9c7, - 0x108589, - 0x229446, - 0xff8c6, - 0x300ac4, - 0x22efca, - 0x304b08, - 0x306109, - 0x3066c6, - 0x2c3cc5, - 0x2a6548, - 0x2d810a, - 0x204343, - 0x207ec6, - 0x2fcac7, - 0x30f6c5, - 0x3c0385, - 0x243cc3, - 0x2ddf84, - 0x2259c5, - 0x28c247, - 0x301705, - 0x2f72c6, - 0x121dc5, - 0x288d83, - 0x2bb9c9, - 0x27c64c, - 0x2cbd4c, - 0x37f088, - 0x2a9f87, - 0x310848, - 0x111507, - 0x31188a, - 0x311f4b, - 0x20d988, - 0x358248, - 0x2524c6, - 0x31fdc5, - 0x25c4ca, - 0x2e8305, - 0x22dc42, - 0x2d6807, - 0x269f86, - 0x37b205, - 0x3d2189, - 0x27e3c5, - 0x388a05, - 0x229a09, - 0x325a46, - 0x37de88, - 0x270103, - 0x228dc6, - 0x3d2506, - 0x32dc85, - 0x32dc89, - 0x2ca889, - 0x25c247, - 0x120f44, - 0x320f47, - 0x3dc909, - 0x21c1c5, - 0x39ec8, - 0x37e245, - 0x371145, - 0x3b3609, - 0x201802, - 0x366984, - 0x20d2c2, - 0x204b82, - 0x320245, - 0x352e48, - 0x2c7b45, - 0x2d5183, - 0x2d5185, - 0x2e2583, - 0x20e202, - 0x2b5bc4, - 0x273cc3, + 0x25afc6, + 0x1519c4, + 0x3bd047, + 0x22820a, + 0x22dc09, + 0x3c9ec7, + 0x3cc38a, + 0x253c43, + 0x368b8b, + 0x20c389, + 0x31f4c5, + 0x20cd87, + 0x16542, + 0x216543, + 0x226f07, + 0x2224c5, + 0x2d8e09, + 0x222bc3, + 0x34bc46, + 0x32a4c3, + 0xd2b03, + 0x11bc06, + 0x17a2c6, + 0x20dc7, + 0x229a46, + 0x231f45, + 0x20a307, + 0x319507, + 0x78f43b43, + 0x34e407, + 0x3b29c3, + 0x2712c5, + 0x2b1b84, + 0x2c2188, + 0x3db94c, + 0x2c1305, + 0x2b01c6, + 0x226dc7, + 0x35b907, + 0x2678c7, + 0x26c148, + 0x31ae8f, + 0x27b905, + 0x24a007, + 0x2151c7, + 0x28974a, + 0x37f609, + 0x330145, + 0x34da0a, + 0x101546, + 0xcc787, + 0x2d7e45, + 0x2f5744, + 0x340486, + 0xcb946, + 0x256ec7, + 0x25acc7, + 0x3b5b48, + 0x3d07c5, + 0x2223c6, + 0x2f048, + 0x228c85, + 0x28e46, + 0x240305, + 0x288284, + 0x21ef87, + 0x24b80a, + 0x2aab88, + 0x3e3cc6, + 0x33243, + 0x2efe85, + 0x3d8586, + 0x3cbb06, + 0x30f6c6, + 0x216443, + 0x3ad047, + 0x215145, + 0x2296c3, + 0x2ee08d, + 0x20d903, + 0x3b5c48, + 0x243444, + 0x281d05, + 0x2b2506, + 0x32e106, + 0x259287, + 0x268bc7, + 0x27c805, + 0x20cb83, + 0x3a20c7, + 0x24b489, + 0x36a249, + 0x382d0a, + 0x23d7c2, + 0x271284, + 0x3211c4, + 0x2fda07, + 0x2fe088, + 0x300709, + 0x3e8289, + 0x301107, + 0x10a949, + 0x2132c6, + 0x103e46, + 0x305504, + 0x3b4e4a, + 0x307e08, + 0x308f89, + 0x309246, + 0x2c7245, + 0x2aaa48, + 0x2dcb8a, + 0x27d683, + 0x202486, + 0x301207, + 0x2c8885, + 0x3c10c5, + 0x24da03, + 0x2d1984, + 0x22f685, + 0x291587, + 0x306145, + 0x2f2cc6, + 0x166105, + 0x2c79c3, + 0x2c79c9, + 0x281acc, + 0x2d06cc, + 0x3416c8, + 0x2a3e47, + 0x3101c8, + 0x110e47, + 0x3111ca, + 0x31188b, + 0x20c4c8, + 0x32e208, + 0x25d306, + 0x272145, + 0x33b5ca, + 0x2ecbc5, + 0x213402, + 0x2db6c7, + 0x27d046, + 0x377045, + 0x313149, + 0x27b485, + 0x1dea48, + 0x29d645, + 0x270e89, + 0x3d84c6, + 0x3407c8, + 0x31dd83, + 0x212c46, + 0x3b7086, + 0x31d845, + 0x31d849, + 0x2cee49, + 0x271ec7, + 0x120d84, + 0x320d87, + 0x3e8189, + 0x228405, + 0x418c8, + 0x379cc5, + 0x3a14c5, + 0x36bc09, + 0x202cc2, + 0x35a344, + 0x204542, + 0x202642, + 0x2ffc05, + 0x351748, + 0x2cc145, + 0x2da043, + 0x2da045, + 0x2e5d83, + 0x212202, + 0x333544, + 0x36a603, 0x200a82, - 0x2c1704, - 0x308c43, - 0x204ac2, - 0x2c7bc3, - 0x213984, - 0x306843, - 0x253cc4, - 0x207742, - 0x216883, - 0x218e43, - 0x2018c2, - 0x25cdc2, - 0x2ca6c9, - 0x208f02, - 0x291bc4, - 0x208742, - 0x3a9e44, - 0x229404, - 0x22aac4, - 0x2072c2, - 0x23d882, - 0x32aa83, - 0x26ac43, - 0x270184, - 0x2c7504, - 0x2ecac4, - 0x2fcc44, - 0x3210c3, - 0x35ce03, - 0x2dd204, - 0x3252c4, - 0x325406, - 0x226642, - 0x12402, - 0x41d83, - 0x212402, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x110c5, + 0x2c4d84, + 0x318f43, + 0x203482, + 0x269503, + 0x2307c4, + 0x3093c3, + 0x25be04, + 0x201ec2, + 0x21f5c3, + 0x214a83, + 0x202d82, + 0x352902, + 0x2cec89, + 0x204942, + 0x296884, + 0x21ef42, + 0x2603c4, + 0x213284, + 0x2d7284, + 0x204a42, + 0x247e02, + 0x35d143, + 0x2a2683, + 0x291704, + 0x2e1184, + 0x307fc4, + 0x31fe44, + 0x31d303, + 0x208f03, + 0x3014c4, + 0x322cc4, + 0x322e06, + 0x229782, + 0x16542, + 0x4ab43, + 0x216542, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x14c45, 0x2000c2, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x203983, - 0x266a83, - 0x20e704, - 0x2ca984, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x216983, - 0x3010c4, - 0x32f643, - 0x2b0743, - 0x380784, - 0x37e046, - 0x20ebc3, - 0x146bc5, - 0x178d87, - 0x226dc3, - 0x6ee10c08, - 0x24cbc3, - 0x2c1183, - 0x21df43, - 0x215f83, - 0x3c7985, - 0x1ba6c3, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x210043, - 0x230743, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x2191c3, - 0x217fc3, - 0x2878c4, - 0x1c0443, - 0x23e083, - 0x25d244, - 0x146bc5, - 0x2ce8c5, - 0x178d87, - 0x212402, - 0x204542, + 0x253c43, + 0x216543, + 0x222bc3, + 0x206203, + 0x343b43, + 0x2b1b84, + 0x2cef44, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x21f6c3, + 0x305b04, + 0x20bc43, + 0x21bcc3, + 0x37dc04, + 0x379ac6, + 0x2079c3, + 0x146c05, + 0xb46c7, + 0x203643, + 0x7aa176c8, + 0x209a43, + 0x2c45c3, + 0x24c243, + 0x233243, + 0x3c2545, + 0xcd43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x212343, + 0x203e43, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x243543, + 0x2296c3, + 0x28ea04, + 0x7ca83, + 0x20cb83, + 0x2cce44, + 0x146c05, + 0x2d3605, + 0xb46c7, + 0x216542, + 0x2104c2, 0x200382, - 0x203182, - 0x5803, + 0x204cc2, + 0xd903, 0x2003c2, - 0x157c44, - 0x22ea43, - 0x236704, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x5803, - 0x23e083, - 0x208503, - 0x24cd44, - 0xae888, - 0x22ea43, - 0x205803, - 0x10103, - 0x126d84, - 0x241ec4, - 0xae888, - 0x22ea43, - 0x24d704, - 0x20e704, - 0x205803, - 0x209282, - 0x1c0443, - 0x23e083, - 0x235403, - 0xddf84, - 0x37b845, - 0x22dc42, - 0x32a143, - 0x2149, - 0xe7546, - 0x17e348, + 0x12dc04, + 0x216543, + 0x23ec84, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x21b544, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x202b03, + 0x25dd04, + 0x793c8, + 0x216543, + 0x20d903, + 0x1a143, + 0x11a5c4, + 0x21d684, + 0x793c8, + 0x16542, + 0x216543, + 0x256d44, + 0x2b1b84, + 0x20d903, + 0x203c82, + 0x7ca83, + 0x20cb83, + 0x24c343, + 0xd1984, + 0x355b45, + 0x213402, + 0x323543, + 0x10e689, + 0xebd86, + 0x1c4688, 0x2000c2, - 0xae888, - 0x212402, - 0x233fc3, - 0x266a83, + 0x793c8, + 0x216542, + 0x222bc3, + 0x343b43, 0x2005c2, - 0x5803, - 0x23e083, - 0xa882, + 0xd903, + 0x20cb83, + 0xfe02, 0x82, 0xc2, - 0x1c9e47, - 0x14b509, - 0x3043, - 0xae888, - 0x10bec3, - 0x72727c47, - 0x2ea43, - 0xaf88, - 0x33fc3, - 0x66a83, - 0x3fec6, - 0x191c3, - 0x56288, - 0xd0ac8, - 0x7dec6, - 0x729a7285, - 0x3cb03, - 0xdb008, - 0x3fa83, - 0x72cedbc6, - 0xf2ec5, - 0x124d04, - 0x341c7, - 0x17fc3, - 0x3443, - 0x3e083, - 0x1b02, - 0x182c8a, - 0x9c43, - 0x732c3f4c, - 0x120303, - 0x5d884, - 0x11af8b, - 0x11b548, - 0x95f42, - 0x17ff03, - 0x1454747, - 0x15b4247, - 0x14d5248, - 0x157ff03, - 0x18b7c8, - 0x156ecb, - 0x10382, - 0x131247, - 0x181584, + 0x1cc547, + 0x14a709, + 0x3a43, + 0x793c8, + 0x17d0c3, + 0x7e3e71c7, + 0x16543, + 0x10508, + 0x22bc3, + 0x143b43, + 0x432c6, + 0x43543, + 0x15d8c8, + 0xd5c08, + 0x1c1ac3, + 0x83646, + 0x7e5a9d85, + 0x16443, + 0x98e48, + 0xdfc08, + 0x103d03, + 0x7e8ef3c6, + 0xf3585, + 0x1a1dc4, + 0x3c787, + 0x296c3, + 0x4f83, + 0xcb83, + 0x4642, + 0x15b0ca, + 0xc205, + 0x7303, + 0x7eed308c, + 0xffcc3, + 0x10ba84, + 0x11ac8b, + 0x11b248, + 0x15d184, + 0x9a402, + 0x4b343, + 0x145fdc7, + 0x157a147, + 0x14da108, + 0x144b343, + 0x1c4b88, + 0x1ad30b, + 0x14182, + 0x132747, + 0x114bc4, 0x2000c2, - 0x212402, - 0x236704, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x215f83, - 0x217fc3, - 0x23e083, - 0x20d403, - 0x208503, - 0xe783, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, + 0x216542, + 0x23ec84, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x233243, + 0x2296c3, + 0x20cb83, + 0x204683, + 0x202b03, + 0x39fc3, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, 0x602, - 0x10103, - 0x66a83, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x215f83, - 0x217fc3, - 0x23e083, - 0x21fcc2, + 0x1a143, + 0x143b43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x233243, + 0x2296c3, + 0x20cb83, + 0x22a042, 0x2000c1, 0x2000c2, 0x200201, - 0x337f82, - 0xae888, - 0x21aa05, + 0x338b82, + 0x793c8, + 0x224545, 0x200101, - 0x2ea43, - 0x319c4, + 0x16543, + 0x3a304, 0x201381, 0x200501, 0x201281, - 0x248442, - 0x38e084, - 0x248443, + 0x251242, + 0x251244, + 0x251943, 0x200041, 0x200801, 0x200181, + 0x18ab06, 0x200701, - 0x35c3c7, - 0x30fccf, - 0x39af46, + 0x30d307, + 0x312d8f, + 0x399086, 0x2004c1, - 0x326dc6, + 0x31a606, 0x200bc1, 0x200581, - 0x3de50e, + 0x3e61ce, 0x2003c1, - 0x23e083, + 0x20cb83, 0x200a81, - 0x32b305, - 0x201b02, - 0x243bc5, + 0x3a8d85, + 0x204642, + 0x24d905, 0x200401, 0x200741, 0x2007c1, - 0x22dc42, + 0x213402, 0x200081, - 0x207d01, - 0x20a8c1, - 0x202341, - 0x201c41, - 0x51709, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x217c83, - 0x22ea43, - 0x266a83, - 0x95e88, - 0x23cb03, - 0x217fc3, - 0x91043, - 0x23e083, - 0x76ef8008, - 0x1e0f83, - 0xff48, - 0x10402, - 0xb9c3, - 0x293c2, - 0x72c2, - 0x146bc5, - 0xae888, - 0x11fe87, - 0x5803, - 0x146bc5, - 0x175d84, - 0x7f448, - 0x46d04, - 0x17fe07, - 0x1c4104, - 0xd5e45, - 0x51709, - 0x1424c7, - 0x5aa4a, - 0x14f800a, - 0xae888, - 0x1c0443, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x233f03, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x2e5904, - 0x23e083, - 0x24a845, - 0x27e1c4, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20b982, - 0x217fc3, - 0x23e083, - 0x8503, - 0xaefca, - 0xea706, - 0x11fa04, - 0x1268c6, - 0x24ac43, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x217fc3, - 0x23e083, - 0x212402, - 0x22ea43, - 0x231f49, - 0x233fc3, - 0x2ac549, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x81004, - 0x5803, - 0x23e083, - 0x3008c8, - 0x239307, - 0x37b845, - 0xde248, - 0x1d6e08, - 0x1c9e47, - 0xf9cca, - 0x7650b, - 0x127007, - 0x40488, - 0x7f70a, - 0x25e48, - 0x14b509, - 0x25887, - 0x14dcc7, - 0x1b5208, - 0xaf88, - 0x4164f, - 0xa6845, - 0x148647, - 0x3fec6, - 0x36487, - 0x12ea86, - 0x56288, - 0x9a346, - 0x17dc87, - 0x1c1989, - 0x1cd6c7, - 0x19c009, - 0xc9049, - 0xce646, - 0xd0ac8, - 0xde505, - 0x8350a, - 0xdb008, - 0x3fa83, - 0xe2988, - 0x341c7, - 0x156b05, - 0x61950, - 0x3443, - 0x1c0443, - 0x17db07, - 0x2cd05, - 0xfb388, - 0x6db85, - 0x120303, - 0x1d1048, - 0xb2c06, - 0x33249, - 0xb6f47, - 0x240b, - 0x72344, - 0x113c44, - 0x11af8b, - 0x11b548, - 0x11be07, - 0x146bc5, - 0x22ea43, - 0x233fc3, - 0x280203, - 0x23e083, - 0x23e883, - 0x266a83, - 0x1c0443, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x1b1e8b, + 0x201641, + 0x207281, + 0x2024c1, + 0x208481, + 0x5c549, + 0x793c8, + 0x216543, + 0x222bc3, + 0xac1c8, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x2203c3, + 0x2f43, + 0x216543, + 0x343b43, + 0x9a348, + 0x216443, + 0x2296c3, + 0x91c43, + 0x20cb83, + 0x82a99048, + 0x1e9343, + 0x12248, + 0xcd42, + 0x3c43, + 0x13242, + 0x4a42, + 0x146c05, + 0x793c8, + 0x9fb06, + 0x15edc7, + 0xd903, + 0x146c05, + 0x171684, + 0x1cdf88, + 0x4f344, + 0x106a47, + 0x60244, + 0xb1c0c, + 0x1db944, + 0xdaf45, + 0x5c549, + 0x16e507, + 0x28846, + 0x191ca, + 0x14f990a, + 0x793c8, + 0x7ca83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x203b43, + 0x793c8, + 0x216543, + 0x222bc3, + 0x2e8fc4, + 0x20cb83, + 0x2655c5, + 0x2484c4, + 0x216543, + 0x222bc3, + 0x343b43, + 0x201582, + 0x2296c3, + 0x20cb83, + 0x2b03, + 0xedac6, + 0x12f6c4, + 0x124c46, + 0x253c43, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2296c3, + 0x20cb83, + 0x216542, + 0x216543, + 0x23a889, + 0x222bc3, + 0x2b79c9, + 0x343b43, + 0x216443, + 0x2296c3, + 0x84e04, + 0xd903, + 0x20cb83, + 0x305308, + 0x3e2687, + 0x355b45, + 0xd3a48, + 0x1db108, + 0x1cc547, + 0xfe30a, + 0x1c790b, + 0x11a847, + 0x48f08, + 0xf4a0a, + 0x26888, + 0x14a709, + 0x2f547, + 0x1ed87, + 0x10efc8, + 0x10508, + 0x4a2cf, + 0xaad45, + 0x1fc47, + 0x432c6, + 0x14cd47, + 0x130286, + 0x15d8c8, + 0xa3706, + 0x1405c7, + 0x1798c9, + 0x1df3c7, + 0xc6d09, + 0xcd749, + 0xd3386, + 0xd5c08, + 0xd3d05, + 0x86f4a, + 0xdfc08, + 0x103d03, + 0xe6008, + 0x3c787, + 0x133485, + 0x649d0, + 0x4f83, + 0x7ca83, + 0x179747, + 0x2d445, + 0xffa48, + 0x74505, + 0xffcc3, + 0x1a3108, + 0x1a1386, + 0x9ec09, + 0xba247, + 0x10e94b, + 0x77a04, + 0x113b84, + 0x11ac8b, + 0x11b248, + 0x11bb07, + 0x146c05, + 0x216543, + 0x222bc3, + 0x2f5503, + 0x20cb83, + 0x248783, + 0x343b43, + 0x7ca83, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x9c1cb, 0x2000c2, - 0x212402, - 0x23e083, + 0x216542, + 0x20cb83, 0xd42, - 0xb982, - 0x83c2, - 0xae888, - 0x1357c9, - 0x18b7c8, - 0x12402, + 0x1582, + 0x1642, + 0x793c8, + 0x1b7409, + 0x1c4b88, + 0x16542, 0x2000c2, - 0x212402, + 0x216542, 0x200382, 0x2005c2, - 0x204482, - 0x217fc3, - 0x14a9c6, + 0x202042, + 0x2296c3, + 0x148ec6, 0x2003c2, - 0xddf84, + 0xd1984, 0x2000c2, - 0x24ac43, - 0x212402, - 0x22ea43, - 0x233fc3, + 0x253c43, + 0x216542, + 0x216543, + 0x222bc3, 0x200382, - 0x266a83, - 0x2191c3, - 0x23cb03, - 0x21e484, - 0x217fc3, - 0x213cc3, - 0x5803, - 0x23e083, - 0x25d884, - 0x20aa43, - 0x266a83, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x205803, - 0x23e083, - 0x3c48c7, - 0x22ea43, - 0x282e87, - 0x394e06, - 0x208483, - 0x20fa03, - 0x266a83, - 0x204903, - 0x20e704, - 0x28f784, - 0x32bac6, - 0x208243, - 0x217fc3, - 0x23e083, - 0x24a845, - 0x2b21c4, - 0x3283c3, - 0x356703, - 0x2d6807, - 0x355f05, - 0x1d03, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x23cb03, - 0x217fc3, - 0x75504, - 0x23e083, - 0x16d43, - 0x7e308dcc, - 0x4e803, - 0x192c07, - 0xe6946, - 0x19dfc7, - 0x157585, - 0x222b02, - 0x23de83, - 0x20c5c3, - 0x24ac43, - 0x7ee2ea43, - 0x204302, - 0x233fc3, - 0x2033c3, - 0x266a83, - 0x20e704, - 0x3433c3, - 0x22d703, - 0x23cb03, - 0x21e484, - 0x7f216102, - 0x217fc3, - 0x23e083, - 0x204ac3, - 0x219243, - 0x218043, - 0x21fcc2, - 0x20aa43, - 0xae888, - 0x266a83, - 0x10103, - 0x215d84, - 0x24ac43, - 0x212402, - 0x22ea43, - 0x236704, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x2191c3, - 0x33f584, - 0x217544, - 0x2e4806, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x216983, - 0x269f86, - 0x3a9cb, - 0x36fc6, - 0xbd84a, - 0x11f48a, - 0xae888, - 0x215e84, - 0x8062ea43, - 0x37e504, - 0x233fc3, - 0x296d84, - 0x266a83, - 0x2f4c43, - 0x23cb03, - 0x217fc3, - 0x1c0443, - 0x23e083, - 0x54543, - 0x34a60b, - 0x3cf9ca, - 0x3e0a8c, - 0xee488, + 0x343b43, + 0x243543, + 0x216443, + 0x21b544, + 0x2296c3, + 0x20b243, + 0xd903, + 0x20cb83, + 0x30ba84, + 0x201643, + 0x343b43, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x20d903, + 0x20cb83, + 0x3c5747, + 0x216543, + 0x2868c7, + 0x3823c6, + 0x209843, + 0x21a003, + 0x343b43, + 0x20e443, + 0x2b1b84, + 0x243544, + 0x3d66c6, + 0x202803, + 0x2296c3, + 0x127f0b, + 0x20cb83, + 0x2655c5, + 0x2f7184, + 0x3b6703, + 0x343483, + 0x2db6c7, + 0x2f6985, + 0x1a1003, + 0x216543, + 0x222bc3, + 0x343b43, + 0x216443, + 0x2296c3, + 0x1c1184, + 0x20cb83, + 0x1fa83, + 0x89f0b24c, + 0x58083, + 0x4bc47, + 0x80dc6, + 0x120047, + 0x133d85, + 0x205242, + 0x246dc3, + 0x211d43, + 0x253c43, + 0x8aa16543, + 0x2080c2, + 0x222bc3, + 0x204f03, + 0x343b43, + 0x2b1b84, + 0x34b203, + 0x27b903, + 0x216443, + 0x21b544, + 0x8ae06c02, + 0x2296c3, + 0x20cb83, + 0x20e603, + 0x209203, + 0x2883c3, + 0x22a042, + 0x201643, + 0x793c8, + 0x343b43, + 0x1a143, + 0x258bc4, + 0x253c43, + 0x216542, + 0x216543, + 0x23ec84, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x243543, + 0x239104, + 0x28d4c4, + 0x2e8306, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x21f6c3, + 0x27d046, + 0x4290b, + 0x32f46, + 0xb6c0a, + 0x11faca, + 0x793c8, + 0x22f004, + 0x8c216543, + 0x32c744, + 0x222bc3, + 0x219a44, + 0x343b43, + 0x284103, + 0x216443, + 0x2296c3, + 0x7ca83, + 0x20cb83, + 0x31c03, + 0x348b0b, + 0x3d368a, + 0x3e8e4c, + 0xefc08, 0x2000c2, - 0x212402, + 0x216542, 0x200382, - 0x22f845, - 0x20e704, - 0x20b982, - 0x23cb03, - 0x217544, - 0x203182, + 0x2b96c5, + 0x2b1b84, + 0x201582, + 0x216443, + 0x28d4c4, + 0x204cc2, 0x2003c2, - 0x208502, - 0x21fcc2, - 0x4ac43, - 0xcdc2, - 0x2d0e89, - 0x36c648, - 0x266909, - 0x213d49, - 0x2184ca, - 0x30aaca, - 0x209482, - 0x361e82, - 0x12402, - 0x22ea43, - 0x22c982, - 0x241546, - 0x37c682, - 0x2013c2, - 0x27688e, - 0x2168ce, - 0x217f47, - 0x211c42, - 0x233fc3, - 0x266a83, - 0x20f342, + 0x202b02, + 0x22a042, + 0x53c43, + 0x4042, + 0x2d5fc9, + 0x278a08, + 0x3d8a09, + 0x39d3c9, + 0x2116ca, + 0x21424a, + 0x208d02, + 0x22e702, + 0x16542, + 0x216543, + 0x213b02, + 0x24a1c6, + 0x378742, + 0x47402, + 0x201442, + 0x3c7c8e, + 0x21f60e, + 0x3d1b47, + 0x219cc2, + 0x222bc3, + 0x343b43, + 0x20bac2, 0x2005c2, - 0xe703, - 0x23690f, - 0x241882, - 0x2c3587, - 0x2ec3c7, - 0x2de787, - 0x2e2b4c, - 0x2f024c, - 0x21f844, - 0x39334a, - 0x216802, - 0x206902, - 0x2cac84, + 0x6a7c3, + 0x23ee8f, + 0x21ee02, + 0x2eae47, + 0x2e2587, + 0x2e61c7, + 0x2f0e4c, + 0x2f2e0c, + 0x258884, + 0x28c28a, + 0x21f542, + 0x207bc2, + 0x2cf304, 0x200702, - 0x2c23c2, - 0x2f0484, - 0x214882, - 0x202702, - 0x1e1c3, - 0x29a3c7, - 0x30eac5, - 0x204202, - 0x236404, - 0x393b42, - 0x2edd48, - 0x217fc3, - 0x37b588, - 0x203242, - 0x21fa05, - 0x39da86, - 0x23e083, - 0x204fc2, - 0x2fc207, - 0x1b02, - 0x34bbc5, - 0x3dce85, - 0x20b502, - 0x206c82, - 0x34754a, - 0x28de4a, - 0x23cac2, - 0x2a39c4, + 0x2d8882, + 0x2f3044, + 0x21ab82, + 0x209d42, + 0x1b283, + 0x2a3787, + 0x288345, + 0x2205c2, + 0x319f04, + 0x30f082, + 0x2ef548, + 0x2296c3, + 0x3773c8, + 0x204d82, + 0x258a45, + 0x39b086, + 0x20cb83, + 0x2059c2, + 0x300947, + 0x4642, + 0x2504c5, + 0x203505, + 0x201782, + 0x207f02, + 0x3cfd8a, + 0x27c68a, + 0x279c42, + 0x2a94c4, 0x200f02, - 0x21dd88, - 0x207ac2, - 0x31c248, - 0x1501, - 0x316b47, - 0x3175c9, - 0x2bb102, - 0x31d505, - 0x36ed45, - 0x212acb, - 0x32c04c, - 0x22c488, - 0x331088, - 0x226642, - 0x2af242, + 0x271148, + 0x20e002, + 0x2b4bc8, + 0x17c1, + 0x316887, + 0x3174c9, + 0x203582, + 0x31c385, + 0x372b05, + 0x3d088b, + 0x3d6c4c, + 0x237288, + 0x332588, + 0x229782, + 0x259342, 0x2000c2, - 0xae888, - 0x212402, - 0x22ea43, + 0x793c8, + 0x216542, + 0x216543, 0x200382, - 0x203182, - 0x5803, + 0x204cc2, + 0xd903, 0x2003c2, - 0x23e083, - 0x208502, + 0x20cb83, + 0x202b02, 0x2000c2, - 0x146bc5, - 0x81a12402, - 0x108f04, - 0x37e05, - 0x82a66a83, - 0x21e1c3, - 0x20b982, - 0x217fc3, - 0x3d6203, - 0x82e3e083, - 0x2f8e43, - 0x27a906, - 0x1608503, - 0x146bc5, - 0x14a88b, - 0xae888, - 0x81f64008, - 0x68f47, - 0x822c6aca, - 0x74d87, - 0x1b5a05, - 0x82600f89, - 0x2c10d, - 0x3fcc2, - 0x11bb42, + 0x146c05, + 0x8d616542, + 0x10b384, + 0x405c5, + 0x8e743b43, + 0x21b283, + 0x201582, + 0x2296c3, + 0x3e8603, + 0x8ea0cb83, + 0x2faf43, + 0x2dc2c6, + 0xf57c5, + 0x1602b03, + 0x146c05, + 0x148d8b, + 0x793c8, + 0x8dbb6d08, + 0x6be87, + 0x8deca7ca, + 0x791c7, + 0x10f7c5, + 0x8e200f89, + 0x2f20d, + 0x430c2, + 0x11b842, 0xe01, - 0x107784, - 0xb018a, - 0x8dc87, - 0x10bb84, - 0x3ca03, - 0x3ca04, - 0x83603d82, - 0x83a00ac2, - 0x83e03502, - 0x84202e42, - 0x846074c2, - 0x84a0cf02, - 0x178d87, - 0x84e12402, - 0x85211d02, - 0x8561c782, - 0x85a0f982, - 0x2168c3, - 0x1ff44, - 0x28c543, - 0x85e12882, - 0x5a388, - 0x86207c82, - 0x4e007, - 0x1b77c7, - 0x86600042, - 0x86a00d82, - 0x86e00182, - 0x87209582, - 0x8760f782, - 0x87a005c2, - 0xfdd45, - 0x24dc03, - 0x3612c4, - 0x87e00702, - 0x8820a342, - 0x88601582, - 0x8d64b, - 0x88a00c42, - 0x89206a02, - 0x8960b982, - 0x89a04482, - 0x89e15782, - 0x8a200bc2, - 0x8a60a942, - 0x8aa720c2, - 0x8ae16102, - 0x8b201602, - 0x8b603182, - 0x8ba37282, - 0x8be05402, - 0x8c209ec2, - 0x1583c4, - 0x3169c3, - 0x8c634e42, - 0x8ca0f442, - 0x8ce03742, - 0x8d2006c2, - 0x8d6003c2, - 0x8da00a82, - 0xf8908, - 0x1b2007, - 0x8de16982, - 0x8e205302, - 0x8e608502, - 0x8ea0a1c2, - 0x1b800c, - 0x8ee03d02, - 0x8f224e42, - 0x8f601942, - 0x8fa034c2, - 0x8fe0e482, - 0x90203b02, - 0x90607d02, - 0x90a16382, - 0x90e7bcc2, - 0x9127c2c2, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x88f433c3, - 0x2220c3, - 0x3c7a04, - 0x266806, - 0x307183, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x26e9c9, - 0x20cdc2, - 0x3b3843, - 0x2c9343, - 0x260045, - 0x2033c3, - 0x3433c3, - 0x2220c3, - 0x2b8a83, - 0x20de03, - 0x3679c9, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x20cdc2, - 0x20cdc2, - 0x3433c3, - 0x2220c3, - 0x91a2ea43, - 0x233fc3, - 0x213f83, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, - 0xae888, - 0x212402, - 0x22ea43, - 0x217fc3, - 0x23e083, - 0x6e842, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x924f6e82, - 0x23cb03, - 0x217fc3, - 0x5803, - 0x23e083, + 0xe91c4, + 0xb530a, + 0x7c4c7, + 0x30044, + 0x30083, + 0x30084, + 0x8f201f02, + 0x8f600ac2, + 0x8fa03b42, + 0x8fe030c2, + 0x90208742, + 0x90604182, + 0xb46c7, + 0x90a16542, + 0x90e19d82, + 0x9121d802, + 0x91603242, + 0x21f603, + 0x2a2c4, + 0x91aac1c8, + 0x213643, + 0x91e18902, + 0x68508, + 0x92204982, + 0x63187, + 0x1b88c7, + 0x92600042, + 0x92a00d82, + 0x92e00182, + 0x932042c2, + 0x93605502, + 0x93a005c2, + 0x11f405, + 0x20af03, + 0x2f44c4, + 0x93e00702, + 0x94211b82, + 0x94605542, + 0x92b8b, + 0x94a00c42, + 0x95256e02, + 0x95601582, + 0x95a02042, + 0x98e48, + 0x95e28882, + 0x96200bc2, + 0x96603742, + 0x96a77782, + 0x96e06c02, + 0x97205782, + 0x97604cc2, + 0x97a18f02, + 0x97e0d502, + 0x9820f502, + 0xac8c4, + 0x332ec3, + 0x9863d1c2, + 0x98a0bbc2, + 0x98e0cfc2, + 0x992006c2, + 0x996003c2, + 0x99a00a82, + 0xfa6c8, + 0x9c347, + 0x99e037c2, + 0x9a202a82, + 0x9a602b02, + 0x9aa0a0c2, + 0x176a8c, + 0x9ae2bdc2, + 0x9b22ce82, + 0x9b602e02, + 0x9ba05002, + 0x9be08e42, + 0x9c211842, + 0x9c6089c2, + 0x9ca13342, + 0x9ce81202, + 0x9d281742, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x207c3, + 0xd2443, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x94f4b203, + 0x2207c3, + 0x3c25c4, + 0x3d8906, + 0x309a43, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x35c349, + 0x204042, + 0x271c43, + 0x2cda43, + 0x3b6945, + 0x204f03, + 0x34b203, + 0x2207c3, + 0x2e69c3, + 0x22e683, + 0x3ca009, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x34b203, + 0x2207c3, + 0x204042, + 0x204042, + 0x34b203, + 0x2207c3, + 0x9da16543, + 0x222bc3, + 0x39d603, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, + 0x793c8, + 0x216542, + 0x216543, + 0x2296c3, + 0x20cb83, + 0x145842, + 0x216543, + 0x222bc3, + 0x343b43, + 0x9e51d0c2, + 0x216443, + 0x2296c3, + 0xd903, + 0x20cb83, 0x1381, - 0x241ec4, - 0x212402, - 0x22ea43, + 0x21d684, + 0x216542, + 0x216543, 0x200983, - 0x233fc3, - 0x24d704, - 0x280203, - 0x266a83, - 0x20e704, - 0x2191c3, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x235403, - 0x37b845, - 0x20de03, - 0x20aa43, + 0x222bc3, + 0x256d44, + 0x2f5503, + 0x343b43, + 0x2b1b84, + 0x243543, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x24c343, + 0x355b45, + 0x22e683, + 0x201643, 0x882, - 0x5803, - 0x212402, - 0x22ea43, - 0x3433c3, - 0x217fc3, - 0x23e083, + 0xd903, + 0x216542, + 0x216543, + 0x34b203, + 0x2296c3, + 0x20cb83, 0x2000c2, - 0x24ac43, - 0xae888, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x231346, - 0x20e704, - 0x2191c3, - 0x21e484, - 0x217fc3, - 0x23e083, - 0x216983, - 0x4cc4, - 0x161e82, - 0x22ea43, - 0x22383, - 0x233fc3, - 0xb982, - 0x217fc3, - 0x23e083, - 0x30242, - 0x2a82, - 0x1481bc7, - 0x8cc7, - 0x22ea43, - 0x36fc6, - 0x233fc3, - 0x266a83, - 0xf07c6, - 0x217fc3, - 0x23e083, - 0x32d948, - 0x330ec9, - 0x341dc9, - 0x34cfc8, - 0x3a01c8, - 0x3a01c9, - 0x32748a, - 0x3657ca, - 0x399a4a, - 0x3a124a, - 0x3cf9ca, - 0x3db28b, - 0x26604d, - 0x230b0f, - 0x240950, - 0x3692cd, - 0x38384c, - 0x3a0f8b, - 0x74f88, - 0xf6cc8, - 0xbe1c5, - 0x148e8c7, - 0xd9785, + 0x253c43, + 0x793c8, + 0x216543, + 0x222bc3, + 0x343b43, + 0x239c86, + 0x2b1b84, + 0x243543, + 0x21b544, + 0x2296c3, + 0x20cb83, + 0x21f6c3, + 0xe804, + 0x2e702, + 0x216543, + 0x20a83, + 0x222bc3, + 0x1582, + 0x2296c3, + 0x20cb83, + 0x10e104, + 0x6ff44, + 0x2a02, + 0x148bb07, + 0x125887, + 0x216543, + 0x32f46, + 0x222bc3, + 0x343b43, + 0xf1386, + 0x2296c3, + 0x20cb83, + 0x32f188, + 0x3323c9, + 0x341cc9, + 0x34b688, + 0x39d208, + 0x39d209, + 0x325d0a, + 0x36084a, + 0x3979ca, + 0x39ecca, + 0x3d368a, + 0x3dfecb, + 0x2fc28d, + 0x2fcf4f, + 0x247450, + 0x3621cd, + 0x3813cc, + 0x39ea0b, + 0x16108, + 0x13ec08, + 0x18a0c5, + 0x190209, + 0x1495c87, + 0xde645, 0x2000c2, - 0x355d45, - 0x21e183, - 0x95a12402, - 0x233fc3, - 0x266a83, - 0x232b87, - 0x21df43, - 0x23cb03, - 0x217fc3, - 0x24dfc3, - 0x213cc3, - 0x210ec3, - 0x205803, - 0x23e083, - 0x2509c6, - 0x22dc42, - 0x20aa43, - 0xae888, + 0x2f67c5, + 0x209d03, + 0xa1e16542, + 0x222bc3, + 0x343b43, + 0x380b87, + 0x24c243, + 0x216443, + 0x2296c3, + 0x257743, + 0x20b243, + 0x20f1c3, + 0x20d903, + 0x20cb83, + 0x259186, + 0x213402, + 0x201643, + 0x793c8, 0x2000c2, - 0x24ac43, - 0x212402, - 0x22ea43, - 0x233fc3, - 0x266a83, - 0x20e704, - 0x23cb03, - 0x217fc3, - 0x23e083, - 0x208503, - 0x8cc7, - 0x10382, - 0x2144, - 0x1517446, + 0x253c43, + 0x216542, + 0x216543, + 0x222bc3, + 0x343b43, + 0x2b1b84, + 0x216443, + 0x2296c3, + 0x20cb83, + 0x202b03, + 0x125887, + 0x14182, + 0x10e684, + 0x1534746, 0x2000c2, - 0x212402, - 0x266a83, - 0x23cb03, - 0x23e083, + 0x216542, + 0x343b43, + 0x216443, + 0x20cb83, } // children is the list of nodes' children, the parent's wildcard bit and the @@ -9614,32 +9781,33 @@ var children = [...]uint32{ 0x40000000, 0x50000000, 0x60000000, - 0x17d05ee, - 0x17d45f4, - 0x17d85f5, - 0x17fc5f6, - 0x19545ff, - 0x196c655, - 0x198065b, - 0x1998660, - 0x19b8666, - 0x19d866e, - 0x19f0676, - 0x1a1067c, - 0x1a14684, - 0x1a3c685, - 0x1a4068f, - 0x1a58690, - 0x1a5c696, - 0x1a60697, - 0x1aa0698, - 0x1aa46a8, - 0x1aa86a9, - 0x21aac6aa, - 0x61ab46ab, - 0x21abc6ad, - 0x1b046af, - 0x1b086c1, + 0x17bc5e9, + 0x17c05ef, + 0x17c45f0, + 0x17e85f1, + 0x19405fa, + 0x1958650, + 0x196c656, + 0x198465b, + 0x19a4661, + 0x19c8669, + 0x19e0672, + 0x1a08678, + 0x1a0c682, + 0x1a34683, + 0x1a3868d, + 0x1a5068e, + 0x1a54694, + 0x1a58695, + 0x1a98696, + 0x1a9c6a6, + 0x1aa06a7, + 0x21aa46a8, + 0x61aac6a9, + 0x21ab46ab, + 0x1afc6ad, + 0x1b046bf, + 0x21b086c1, 0x1b2c6c2, 0x1b306cb, 0x1b446cc, @@ -9647,573 +9815,621 @@ var children = [...]uint32{ 0x1b686d2, 0x1b986da, 0x1bb46e6, - 0x1bdc6ed, - 0x1bec6f7, - 0x1bf06fb, - 0x1c886fc, - 0x1c9c722, - 0x1cb0727, - 0x1ce872c, - 0x1cf873a, - 0x1d0c73e, - 0x1d24743, - 0x1dc8749, - 0x1ffc772, - 0x20007ff, - 0x206c800, - 0x20d881b, - 0x20f0836, - 0x210483c, - 0x2108841, - 0x2110842, + 0x1bbc6ed, + 0x1be46ef, + 0x1bf86f9, + 0x21bfc6fe, + 0x1c006ff, + 0x1c98700, + 0x1cac726, + 0x1cc072b, + 0x1cfc730, + 0x1d0c73f, + 0x1d20743, + 0x1d38748, + 0x1ddc74e, + 0x2010777, + 0x2018804, + 0x2201c806, + 0x22020807, + 0x208c808, + 0x20f8823, + 0x211083e, 0x2124844, 0x2128849, - 0x214884a, - 0x2198852, - 0x219c866, - 0x221a0867, - 0x21c0868, + 0x213084a, + 0x214884c, + 0x214c852, + 0x2170853, + 0x21c085c, 0x21c4870, - 0x21c8871, - 0x21f0872, - 0x621f487c, - 0x223887d, - 0x223c88e, - 0x6224088f, - 0x225c890, - 0x228c897, - 0x229c8a3, - 0x22ac8a7, - 0x23608ab, - 0x23648d8, - 0x223748d9, - 0x223788dd, - 0x223808de, - 0x23d88e0, - 0x23dc8f6, - 0x23e08f7, - 0x29348f8, - 0x2938a4d, - 0x62940a4e, - 0x229e8a50, - 0x229eca7a, - 0x229f0a7b, - 0x229fca7c, - 0x22a00a7f, - 0x22a0ca80, - 0x22a10a83, - 0x22a14a84, - 0x22a18a85, - 0x22a1ca86, - 0x22a20a87, - 0x22a2ca88, - 0x22a30a8b, - 0x22a3ca8c, - 0x22a40a8f, - 0x22a44a90, - 0x22a48a91, - 0x22a54a92, - 0x22a58a95, - 0x22a64a96, - 0x22a68a99, - 0x22a6ca9a, - 0x22a70a9b, - 0x2a74a9c, - 0x22a78a9d, - 0x22a84a9e, - 0x22a88aa1, - 0x2a8caa2, - 0x2a94aa3, - 0x62aa0aa5, - 0x2ae4aa8, - 0x22b04ab9, + 0x221c8871, + 0x21e8872, + 0x21ec87a, + 0x21f087b, + 0x221c87c, + 0x62220887, + 0x22228888, + 0x2222c88a, + 0x227088b, + 0x227489c, + 0x6227889d, + 0x229489e, + 0x22e88a5, + 0x222ec8ba, + 0x222f08bb, + 0x222f88bc, + 0x222fc8be, + 0x223008bf, + 0x223048c0, + 0x230c8c1, + 0x23108c3, + 0x2231c8c4, + 0x223248c7, + 0x23348c9, + 0x23448cd, + 0x23f88d1, + 0x23fc8fe, + 0x2240c8ff, + 0x22410903, + 0x22418904, + 0x2470906, + 0x247491c, + 0x247891d, + 0x29ec91e, + 0x29f0a7b, + 0x22a98a7c, + 0x22a9caa6, + 0x22aa0aa7, + 0x22aacaa8, + 0x22ab0aab, + 0x22abcaac, + 0x22ac0aaf, + 0x22ac4ab0, + 0x22ac8ab1, + 0x22accab2, + 0x22ad0ab3, + 0x22adcab4, + 0x22ae0ab7, + 0x22aecab8, + 0x22af0abb, + 0x22af4abc, + 0x22af8abd, + 0x22b04abe, 0x22b08ac1, - 0x22b0cac2, - 0x22b10ac3, - 0x22b14ac4, - 0x22b1cac5, + 0x22b14ac2, + 0x22b18ac5, + 0x22b1cac6, 0x22b20ac7, 0x2b24ac8, - 0x22b44ac9, - 0x22b48ad1, - 0x22b4cad2, - 0x22b50ad3, - 0x22b54ad4, - 0x22b58ad5, - 0x2b60ad6, - 0x2b68ad8, - 0x2b6cada, - 0x2b88adb, - 0x2ba0ae2, + 0x22b28ac9, + 0x22b34aca, + 0x22b38acd, + 0x2b3cace, + 0x2b44acf, + 0x22b50ad1, + 0x62b5cad4, + 0x2ba0ad7, 0x2ba4ae8, - 0x2bb4ae9, - 0x2bc0aed, - 0x2bf4af0, - 0x2bfcafd, - 0x22c00aff, - 0x2c18b00, - 0x22c20b06, + 0x22bc4ae9, + 0x22bc8af1, + 0x22bccaf2, + 0x22bd4af3, + 0x22bdcaf5, + 0x22be0af7, + 0x22be4af8, + 0x22becaf9, + 0x22bf0afb, + 0x22bf4afc, + 0x2bf8afd, + 0x22c18afe, + 0x22c1cb06, + 0x22c20b07, 0x22c24b08, - 0x22c2cb09, - 0x2d28b0b, - 0x22d2cb4a, - 0x2d34b4b, - 0x2d38b4d, - 0x22d3cb4e, - 0x2d40b4f, - 0x2d68b50, - 0x2d6cb5a, - 0x2d70b5b, - 0x2d88b5c, - 0x2d9cb62, - 0x2dc4b67, - 0x2de4b71, - 0x2de8b79, - 0x62decb7a, - 0x2e20b7b, + 0x22c28b09, + 0x22c34b0a, + 0x22c38b0d, + 0x2c3cb0e, + 0x2c44b0f, + 0x2c4cb11, + 0x2c50b13, + 0x2c6cb14, + 0x2c84b1b, + 0x2c88b21, + 0x2c98b22, + 0x2ca4b26, + 0x2cd8b29, + 0x2ce0b36, + 0x22ce4b38, + 0x2cfcb39, + 0x22d04b3f, + 0x22d08b41, + 0x22d10b42, + 0x2e0cb44, + 0x22e10b83, + 0x2e18b84, + 0x2e1cb86, + 0x22e20b87, 0x2e24b88, - 0x22e28b89, - 0x2e2cb8a, - 0x2e54b8b, + 0x2e54b89, 0x2e58b95, - 0x2e7cb96, - 0x2e80b9f, - 0x2e94ba0, - 0x2e98ba5, - 0x2e9cba6, - 0x2ebcba7, - 0x2ed8baf, + 0x2e5cb96, + 0x2e74b97, + 0x2e88b9d, + 0x2eb0ba2, + 0x2ed8bac, 0x2edcbb6, - 0x22ee0bb7, - 0x2ee4bb8, - 0x2ee8bb9, - 0x2eecbba, - 0x2ef4bbb, - 0x2f08bbd, - 0x2f0cbc2, - 0x2f10bc3, - 0x2f38bc4, - 0x2f3cbce, - 0x2fb0bcf, - 0x2fb4bec, - 0x2fb8bed, - 0x2fd8bee, - 0x2ff0bf6, + 0x62ee0bb7, + 0x2f14bb8, + 0x2f18bc5, + 0x22f1cbc6, + 0x2f20bc7, + 0x2f48bc8, + 0x2f4cbd2, + 0x2f70bd3, + 0x2f74bdc, + 0x2f88bdd, + 0x2f8cbe2, + 0x2f90be3, + 0x2fb0be4, + 0x2fd4bec, + 0x22fd8bf5, + 0x22fdcbf6, + 0x2fe0bf7, + 0x22fe4bf8, + 0x2fe8bf9, + 0x2fecbfa, + 0x2ff0bfb, 0x2ff4bfc, - 0x3008bfd, - 0x3020c02, - 0x3040c08, - 0x3058c10, - 0x305cc16, - 0x3078c17, - 0x3094c1e, - 0x3098c25, - 0x30c4c26, - 0x30e4c31, - 0x3104c39, - 0x3168c41, - 0x3188c5a, - 0x31a8c62, - 0x31acc6a, - 0x31c4c6b, - 0x3208c71, - 0x3288c82, - 0x32b8ca2, - 0x32bccae, - 0x32c8caf, - 0x32e8cb2, - 0x32eccba, - 0x3310cbb, - 0x3318cc4, - 0x3354cc6, - 0x33a8cd5, - 0x33accea, - 0x33b0ceb, - 0x3494cec, - 0x2349cd25, - 0x234a0d27, - 0x234a4d28, - 0x34a8d29, - 0x234acd2a, - 0x234b0d2b, - 0x34b4d2c, - 0x234b8d2d, - 0x234c8d2e, - 0x234ccd32, - 0x234d0d33, - 0x234d4d34, - 0x234d8d35, - 0x234dcd36, - 0x34f4d37, - 0x3518d3d, - 0x3538d46, - 0x3ba4d4e, - 0x3bb0ee9, - 0x3bd0eec, - 0x3d90ef4, - 0x3e60f64, - 0x3ed0f98, - 0x3f28fb4, - 0x4010fca, - 0x4069004, - 0x40a501a, - 0x41a1029, - 0x426d068, - 0x430509b, - 0x43950c1, - 0x43f90e5, - 0x46310fe, - 0x46e918c, - 0x47b51ba, - 0x48011ed, - 0x4889200, - 0x48c5222, - 0x4915231, - 0x498d245, - 0x64991263, - 0x64995264, - 0x64999265, - 0x4a15266, - 0x4a71285, - 0x4aed29c, - 0x4b652bb, - 0x4be52d9, - 0x4c512f9, - 0x4d7d314, - 0x4dd535f, - 0x64dd9375, - 0x4e71376, - 0x4e7939c, - 0x24e7d39e, - 0x4f0539f, - 0x4f513c1, - 0x4fb93d4, - 0x50613ee, - 0x5129418, - 0x519144a, - 0x52a5464, - 0x652a94a9, - 0x652ad4aa, - 0x53094ab, - 0x53654c2, - 0x53f54d9, - 0x54714fd, - 0x54b551c, - 0x559952d, - 0x55cd566, - 0x562d573, - 0x56a158b, - 0x57295a8, - 0x57695ca, - 0x57d95da, - 0x657dd5f6, - 0x58055f7, - 0x5809601, - 0x5839602, - 0x585560e, - 0x5899615, - 0x58a9626, - 0x58c162a, - 0x5939630, - 0x594164e, - 0x595d650, - 0x5971657, - 0x598d65c, - 0x59b9663, - 0x59bd66e, - 0x59c566f, - 0x59d9671, - 0x59f9676, - 0x5a0967e, - 0x5a15682, - 0x5a51685, - 0x5a59694, - 0x5a6d696, - 0x5a9569b, - 0x5aa16a5, - 0x5aa96a8, - 0x5ad16aa, - 0x5af56b4, - 0x5b0d6bd, - 0x5b116c3, - 0x5b196c4, - 0x5b2d6c6, - 0x5bd56cb, - 0x5bd96f5, + 0x3010bfd, + 0x23014c04, + 0x2301cc05, + 0x3020c07, + 0x3048c08, + 0x305cc12, + 0x30d0c17, + 0x30dcc34, + 0x30e0c37, + 0x3100c38, + 0x3118c40, + 0x311cc46, + 0x3130c47, + 0x3148c4c, + 0x3168c52, + 0x3180c5a, + 0x3188c60, + 0x31a4c62, + 0x31c0c69, + 0x31c4c70, + 0x31f0c71, + 0x3210c7c, + 0x3230c84, + 0x3298c8c, + 0x32b8ca6, + 0x32d8cae, + 0x32dccb6, + 0x32f4cb7, + 0x3338cbd, + 0x33b8cce, + 0x33f4cee, + 0x33f8cfd, + 0x3404cfe, + 0x3424d01, + 0x3428d09, + 0x344cd0a, + 0x3454d13, + 0x3494d15, + 0x34e8d25, + 0x34ecd3a, + 0x34f0d3b, + 0x35e4d3c, + 0x235ecd79, + 0x235f0d7b, + 0x235f4d7c, + 0x35f8d7d, + 0x235fcd7e, + 0x23600d7f, + 0x23604d80, + 0x3608d81, + 0x2360cd82, + 0x2361cd83, + 0x23620d87, + 0x23624d88, + 0x23628d89, + 0x2362cd8a, + 0x23638d8b, + 0x2363cd8e, + 0x3654d8f, + 0x3678d95, + 0x3698d9e, + 0x3d0cda6, + 0x23d10f43, + 0x23d14f44, + 0x23d18f45, + 0x23d1cf46, + 0x3d2cf47, + 0x3d4cf4b, + 0x3f0cf53, + 0x3fdcfc3, + 0x404cff7, + 0x40a5013, + 0x418d029, + 0x41e5063, + 0x4221079, + 0x431d088, + 0x43e90c7, + 0x44810fa, + 0x4511120, + 0x4575144, + 0x47ad15d, + 0x48651eb, + 0x4931219, + 0x497d24c, + 0x4a0525f, + 0x4a41281, + 0x4a91290, + 0x4b092a4, + 0x64b0d2c2, + 0x64b112c3, + 0x64b152c4, + 0x4b912c5, + 0x4bed2e4, + 0x4c692fb, + 0x4ce131a, + 0x4d61338, + 0x4dcd358, + 0x4ef9373, + 0x4f513be, + 0x64f553d4, + 0x4fed3d5, + 0x4ff53fb, + 0x24ff93fd, + 0x50813fe, + 0x50cd420, + 0x5135433, + 0x51dd44d, + 0x52a5477, + 0x530d4a9, + 0x54214c3, + 0x65425508, + 0x65429509, + 0x548550a, + 0x54e1521, + 0x5571538, + 0x55ed55c, + 0x563157b, + 0x571558c, + 0x57495c5, + 0x57a95d2, + 0x581d5ea, + 0x58a5607, + 0x58e5629, + 0x5955639, + 0x65959655, + 0x5981656, + 0x5985660, + 0x59b5661, + 0x59d166d, + 0x5a15674, + 0x5a25685, + 0x5a3d689, + 0x5ab568f, + 0x5abd6ad, + 0x5ad96af, + 0x5aed6b6, + 0x5b116bb, + 0x25b156c4, + 0x5b416c5, + 0x5b456d0, + 0x5b4d6d1, + 0x5b616d3, + 0x5b816d8, + 0x5b916e0, + 0x5b9d6e4, + 0x5bd96e7, 0x5bdd6f6, - 0x5be16f7, - 0x5c056f8, - 0x5c29701, - 0x5c4570a, - 0x5c59711, - 0x5c6d716, - 0x5c7571b, - 0x5c7d71d, - 0x5c8571f, - 0x5c9d721, - 0x5cad727, - 0x5cb172b, - 0x5ccd72c, - 0x6555733, - 0x658d955, - 0x65b9963, - 0x65d596e, - 0x65f5975, - 0x661597d, - 0x6659985, - 0x6661996, - 0x26665998, - 0x26669999, - 0x667199a, - 0x687199c, - 0x26875a1c, - 0x6879a1d, - 0x2687da1e, - 0x2688da1f, - 0x26895a23, - 0x268a1a25, - 0x68a5a28, - 0x268a9a29, - 0x268b1a2a, - 0x68b9a2c, - 0x68c9a2e, - 0x68f1a32, - 0x692da3c, - 0x6931a4b, - 0x6969a4c, - 0x698da5a, - 0x74e5a63, - 0x74e9d39, - 0x74edd3a, - 0x274f1d3b, - 0x74f5d3c, - 0x274f9d3d, - 0x74fdd3e, - 0x27509d3f, - 0x750dd42, - 0x7511d43, - 0x27515d44, - 0x7519d45, - 0x27521d46, - 0x7525d48, - 0x7529d49, - 0x27539d4a, - 0x753dd4e, - 0x7541d4f, - 0x7545d50, - 0x7549d51, - 0x2754dd52, - 0x7551d53, - 0x7555d54, - 0x7559d55, - 0x755dd56, - 0x27565d57, - 0x7569d59, - 0x756dd5a, - 0x7571d5b, - 0x27575d5c, - 0x7579d5d, - 0x27581d5e, - 0x27585d60, - 0x75a1d61, - 0x75b9d68, - 0x75fdd6e, - 0x7601d7f, - 0x7625d80, - 0x7631d89, - 0x7635d8c, - 0x7639d8d, - 0x77fdd8e, - 0x27801dff, - 0x27809e00, - 0x2780de02, - 0x27811e03, - 0x7819e04, - 0x78f5e06, - 0x27901e3d, - 0x27905e40, - 0x27909e41, - 0x2790de42, - 0x7911e43, - 0x793de44, - 0x7949e4f, - 0x794de52, - 0x7971e53, - 0x797de5c, - 0x799de5f, - 0x79a1e67, - 0x79d9e68, - 0x7c89e76, - 0x7d45f22, - 0x7d49f51, - 0x7d4df52, - 0x7d61f53, - 0x7d65f58, - 0x7d99f59, - 0x7dd1f66, - 0x27dd5f74, - 0x7df1f75, - 0x7e19f7c, - 0x7e1df86, - 0x7e41f87, - 0x7e5df90, - 0x7e85f97, - 0x7e95fa1, - 0x7e99fa5, - 0x7e9dfa6, - 0x7ed5fa7, - 0x7ee1fb5, - 0x7f09fb8, - 0x7f95fc2, - 0x27f99fe5, - 0x7f9dfe6, - 0x7fadfe7, - 0x27fb1feb, - 0x7fc1fec, - 0x7fddff0, - 0x7ffdff7, - 0x8001fff, - 0x8016000, - 0x802a005, - 0x802e00a, - 0x803200b, - 0x803600c, - 0x805600d, - 0x80fe015, - 0x810203f, - 0x811e040, - 0x8146047, - 0x28156051, - 0x815a055, - 0x8166056, - 0x8192059, - 0x819a064, - 0x81ae066, - 0x81ce06b, - 0x81ea073, - 0x81fa07a, - 0x821207e, - 0x824a084, - 0x824e092, - 0x8322093, - 0x83260c8, - 0x833a0c9, - 0x83420ce, - 0x835a0d0, - 0x835e0d6, - 0x836a0d7, - 0x83760da, - 0x837a0dd, - 0x83820de, - 0x83860e0, - 0x83aa0e1, - 0x83ea0ea, - 0x83ee0fa, - 0x840e0fb, - 0x845e103, - 0x848e117, - 0x28492123, - 0x849a124, - 0x84f2126, - 0x84f613c, - 0x84fa13d, - 0x84fe13e, - 0x854213f, - 0x8552150, - 0x8592154, - 0x8596164, - 0x85c6165, - 0x870e171, - 0x87361c3, - 0x876e1cd, - 0x87961db, - 0x2879e1e5, - 0x287a21e7, - 0x287a61e8, - 0x87ae1e9, - 0x87ba1eb, - 0x88d61ee, - 0x88e2235, - 0x88ee238, - 0x88fa23b, - 0x890623e, - 0x8912241, - 0x891e244, - 0x892a247, - 0x893624a, - 0x894224d, - 0x894e250, - 0x895a253, - 0x8966256, - 0x8972259, - 0x897a25c, - 0x898625e, - 0x8992261, - 0x899e264, - 0x89aa267, - 0x89b626a, - 0x89c226d, - 0x89ce270, - 0x89da273, - 0x89e6276, - 0x89f2279, - 0x89fe27c, - 0x8a2a27f, - 0x8a3628a, - 0x8a4228d, - 0x8a4e290, - 0x8a5a293, - 0x8a66296, - 0x8a6e299, - 0x8a7a29b, - 0x8a8629e, - 0x8a922a1, - 0x8a9e2a4, - 0x8aaa2a7, - 0x8ab62aa, - 0x8ac22ad, - 0x8ace2b0, - 0x8ada2b3, - 0x8ae62b6, - 0x8af22b9, - 0x8afa2bc, - 0x8b062be, - 0x8b0e2c1, - 0x8b1a2c3, - 0x8b262c6, - 0x8b322c9, - 0x8b3e2cc, - 0x8b4a2cf, + 0x5be56f7, + 0x5bf96f9, + 0x5c216fe, + 0x5c2d708, + 0x5c3570b, + 0x5c5d70d, + 0x5c81717, + 0x5c99720, + 0x5c9d726, + 0x5ca5727, + 0x5cad729, + 0x5cc172b, + 0x5d71730, + 0x5d7575c, + 0x5d7d75d, + 0x5d8175f, + 0x5da5760, + 0x5dc9769, + 0x5de5772, + 0x5df9779, + 0x5e0d77e, + 0x5e15783, + 0x5e1d785, + 0x5e25787, + 0x5e3d789, + 0x5e4d78f, + 0x5e51793, + 0x5e6d794, + 0x66f579b, + 0x672d9bd, + 0x67599cb, + 0x67759d6, + 0x67799dd, + 0x2677d9de, + 0x679d9df, + 0x67bd9e7, + 0x68019ef, + 0x6809a00, + 0x2680da02, + 0x26811a03, + 0x6819a04, + 0x6a35a06, + 0x6a49a8d, + 0x26a4da92, + 0x6a51a93, + 0x6a59a94, + 0x26a5da96, + 0x26a61a97, + 0x26a6da98, + 0x26a7da9b, + 0x26a85a9f, + 0x26a91aa1, + 0x6a95aa4, + 0x26a99aa5, + 0x26ab1aa6, + 0x26ab9aac, + 0x26abdaae, + 0x26ac5aaf, + 0x26ac9ab1, + 0x26acdab2, + 0x26ad5ab3, + 0x6addab5, + 0x6af1ab7, + 0x6b19abc, + 0x6b55ac6, + 0x6b59ad5, + 0x6b91ad6, + 0x6bb5ae4, + 0x770daed, + 0x7711dc3, + 0x7715dc4, + 0x27719dc5, + 0x771ddc6, + 0x27721dc7, + 0x7725dc8, + 0x27731dc9, + 0x7735dcc, + 0x7739dcd, + 0x2773ddce, + 0x7741dcf, + 0x27749dd0, + 0x774ddd2, + 0x7751dd3, + 0x27761dd4, + 0x7765dd8, + 0x7769dd9, + 0x776ddda, + 0x7771ddb, + 0x27775ddc, + 0x7779ddd, + 0x777ddde, + 0x7781ddf, + 0x7785de0, + 0x2778dde1, + 0x7791de3, + 0x7795de4, + 0x7799de5, + 0x2779dde6, + 0x77a1de7, + 0x277a9de8, + 0x277addea, + 0x77c9deb, + 0x77e1df2, + 0x7825df8, + 0x7829e09, + 0x784de0a, + 0x7861e13, + 0x7865e18, + 0x7869e19, + 0x7a2de1a, + 0x27a31e8b, + 0x27a39e8c, + 0x27a3de8e, + 0x27a41e8f, + 0x7a49e90, + 0x7b25e92, + 0x27b31ec9, + 0x27b35ecc, + 0x27b39ecd, + 0x27b3dece, + 0x7b41ecf, + 0x7b6ded0, + 0x7b79edb, + 0x7b7dede, + 0x7ba1edf, + 0x7badee8, + 0x7bcdeeb, + 0x7bd1ef3, + 0x7c09ef4, + 0x7ebdf02, + 0x7f79faf, + 0x7f7dfde, + 0x7f81fdf, + 0x7f95fe0, + 0x7f99fe5, + 0x7fcdfe6, + 0x8005ff3, + 0x2800a001, + 0x8026002, + 0x804e009, + 0x8052013, + 0x8076014, + 0x809201d, + 0x80ba024, + 0x80ca02e, + 0x80ce032, + 0x80d2033, + 0x810e034, + 0x811a043, + 0x8142046, + 0x81de050, + 0x281e2077, + 0x81e6078, + 0x81f6079, + 0x281fa07d, + 0x820a07e, + 0x8226082, + 0x8246089, + 0x824a091, + 0x825e092, + 0x8272097, + 0x827609c, + 0x827a09d, + 0x827e09e, + 0x829e09f, + 0x834a0a7, + 0x834e0d2, + 0x836e0d3, + 0x839a0db, + 0x283aa0e6, + 0x83ae0ea, + 0x83be0eb, + 0x83f60ef, + 0x83fe0fd, + 0x84120ff, + 0x8432104, + 0x844e10c, + 0x845a113, + 0x8472116, + 0x84aa11c, + 0x84ae12a, + 0x858212b, + 0x8586160, + 0x859a161, + 0x85a2166, + 0x85ba168, + 0x85be16e, + 0x85ca16f, + 0x85d6172, + 0x85da175, + 0x85e2176, + 0x85e6178, + 0x860a179, + 0x864a182, + 0x864e192, + 0x866e193, + 0x86c219b, + 0x86f21b0, + 0x286f61bc, + 0x86fe1bd, + 0x87561bf, + 0x875a1d5, + 0x875e1d6, + 0x87621d7, + 0x87a61d8, + 0x87b61e9, + 0x87f61ed, + 0x87fa1fd, + 0x882a1fe, + 0x897620a, + 0x899e25d, + 0x89da267, + 0x8a02276, + 0x28a0a280, + 0x28a0e282, + 0x28a12283, + 0x8a1a284, + 0x8a26286, + 0x8b4a289, 0x8b562d2, 0x8b622d5, 0x8b6e2d8, - 0x8b722db, - 0x8b7e2dc, - 0x8b9a2df, - 0x8b9e2e6, - 0x8bae2e7, - 0x8bd22eb, - 0x8bd62f4, - 0x8c1a2f5, - 0x8c22306, - 0x8c36308, - 0x8c6a30d, - 0x8c8a31a, - 0x8c92322, - 0x8cb6324, - 0x8cce32d, - 0x8ce6333, - 0x8cfe339, - 0x8d1233f, - 0x28d5a344, - 0x8d5e356, - 0x8d8a357, - 0x8d9a362, - 0x8dae366, + 0x8b7a2db, + 0x8b862de, + 0x8b922e1, + 0x8b9e2e4, + 0x8baa2e7, + 0x8bb62ea, + 0x8bc22ed, + 0x28bc62f0, + 0x8bd22f1, + 0x8bde2f4, + 0x8bea2f7, + 0x8bf22fa, + 0x8bfe2fc, + 0x8c0a2ff, + 0x8c16302, + 0x8c22305, + 0x8c2e308, + 0x8c3a30b, + 0x8c4630e, + 0x8c52311, + 0x8c5e314, + 0x8c6a317, + 0x8c7631a, + 0x8ca231d, + 0x8cae328, + 0x8cba32b, + 0x8cc632e, + 0x8cd2331, + 0x8cde334, + 0x8ce6337, + 0x8cf2339, + 0x8cfe33c, + 0x8d0a33f, + 0x8d16342, + 0x8d22345, + 0x8d2e348, + 0x8d3a34b, + 0x8d4634e, + 0x8d52351, + 0x8d5e354, + 0x8d6a357, + 0x8d7235a, + 0x8d7e35c, + 0x8d8635f, + 0x8d92361, + 0x8d9e364, + 0x8daa367, + 0x8db636a, + 0x8dc236d, + 0x8dce370, + 0x8dda373, + 0x8de6376, + 0x8dea379, + 0x8df637a, + 0x8e1237d, + 0x8e16384, + 0x8e26385, + 0x8e4a389, + 0x8e4e392, + 0x8e92393, + 0x8e9a3a4, + 0x8eae3a6, + 0x8ee23ab, + 0x8f023b8, + 0x8f063c0, + 0x8f0e3c1, + 0x8f323c3, + 0x8f4a3cc, + 0x8f623d2, + 0x8f7a3d8, + 0x8f923de, + 0x28fda3e4, + 0x8fde3f6, + 0x900a3f7, + 0x901a402, + 0x902e406, } -// max children 601 (capacity 1023) -// max text offset 30901 (capacity 32767) +// max children 650 (capacity 1023) +// max text offset 31341 (capacity 32767) // max text length 36 (capacity 63) -// max hi 9067 (capacity 16383) -// max lo 9062 (capacity 16383) +// max hi 9227 (capacity 16383) +// max lo 9222 (capacity 16383) diff --git a/vendor/golang.org/x/oauth2/README.md b/vendor/golang.org/x/oauth2/README.md index 8cfd6063e72..1473e1296d0 100644 --- a/vendor/golang.org/x/oauth2/README.md +++ b/vendor/golang.org/x/oauth2/README.md @@ -1,7 +1,7 @@ # OAuth2 for Go +[![Go Reference](https://pkg.go.dev/badge/golang.org/x/oauth2.svg)](https://pkg.go.dev/golang.org/x/oauth2) [![Build Status](https://travis-ci.org/golang/oauth2.svg?branch=master)](https://travis-ci.org/golang/oauth2) -[![GoDoc](https://godoc.org/golang.org/x/oauth2?status.svg)](https://godoc.org/golang.org/x/oauth2) oauth2 package contains a client implementation for OAuth 2.0 spec. @@ -14,17 +14,17 @@ go get golang.org/x/oauth2 Or you can manually git clone the repository to `$(go env GOPATH)/src/golang.org/x/oauth2`. -See godoc for further documentation and examples. +See pkg.go.dev for further documentation and examples. -* [godoc.org/golang.org/x/oauth2](https://godoc.org/golang.org/x/oauth2) -* [godoc.org/golang.org/x/oauth2/google](https://godoc.org/golang.org/x/oauth2/google) +* [pkg.go.dev/golang.org/x/oauth2](https://pkg.go.dev/golang.org/x/oauth2) +* [pkg.go.dev/golang.org/x/oauth2/google](https://pkg.go.dev/golang.org/x/oauth2/google) ## Policy for new packages We no longer accept new provider-specific packages in this repo if all they do is add a single endpoint variable. If you just want to add a single endpoint, add it to the -[godoc.org/golang.org/x/oauth2/endpoints](https://godoc.org/golang.org/x/oauth2/endpoints) +[pkg.go.dev/golang.org/x/oauth2/endpoints](https://pkg.go.dev/golang.org/x/oauth2/endpoints) package. ## Report Issues / Send Patches diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index c0f9f2d523f..928fa7a9fb1 100644 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -561,6 +561,7 @@ ccflags="$@" $2 ~ /^(HDIO|WIN|SMART)_/ || $2 ~ /^CRYPTO_/ || $2 ~ /^TIPC_/ || + $2 !~ "DEVLINK_RELOAD_LIMITS_VALID_MASK" && $2 ~ /^DEVLINK_/ || $2 ~ /^LWTUNNEL_IP/ || $2 !~ "WMESGLEN" && diff --git a/vendor/golang.org/x/sys/unix/ptrace_darwin.go b/vendor/golang.org/x/sys/unix/ptrace_darwin.go new file mode 100644 index 00000000000..fc568b5403e --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ptrace_darwin.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build darwin,!ios + +package unix + +func ptrace(request int, pid int, addr uintptr, data uintptr) error { + return ptrace1(request, pid, addr, data) +} diff --git a/vendor/golang.org/x/sys/unix/ptrace_ios.go b/vendor/golang.org/x/sys/unix/ptrace_ios.go new file mode 100644 index 00000000000..183441c9a53 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/ptrace_ios.go @@ -0,0 +1,11 @@ +// Copyright 2020 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build ios + +package unix + +func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { + return ENOTSUP +} diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go index dc0befee37e..ee852f1abc5 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin.1_13.go @@ -26,7 +26,6 @@ func fdopendir(fd int) (dir uintptr, err error) { func libc_fdopendir_trampoline() -//go:linkname libc_fdopendir libc_fdopendir //go:cgo_import_dynamic libc_fdopendir fdopendir "/usr/lib/libSystem.B.dylib" func Getdirentries(fd int, buf []byte, basep *uintptr) (n int, err error) { diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go index 6c1f4ab95b4..ee065fcf2da 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_386.go @@ -45,6 +45,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go index 0582ae256ef..7a1f64a7b6b 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go @@ -45,6 +45,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Fstatfs(fd int, stat *Statfs_t) (err error) = SYS_FSTATFS64 //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT64 //sys Lstat(path string, stat *Stat_t) (err error) = SYS_LSTAT64 -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) = SYS_STAT64 //sys Statfs(path string, stat *Statfs_t) (err error) = SYS_STATFS64 diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go index c6a9733b4cb..d30735c5d63 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go @@ -6,7 +6,7 @@ package unix import "syscall" -func ptrace(request int, pid int, addr uintptr, data uintptr) error { +func ptrace1(request int, pid int, addr uintptr, data uintptr) error { return ENOTSUP } diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go index 253afa4de55..9f85fd4046e 100644 --- a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go @@ -45,6 +45,6 @@ func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, //sys Fstatfs(fd int, stat *Statfs_t) (err error) //sys getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) = SYS_GETFSSTAT //sys Lstat(path string, stat *Stat_t) (err error) -//sys ptrace(request int, pid int, addr uintptr, data uintptr) (err error) +//sys ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) = SYS_ptrace //sys Stat(path string, stat *Stat_t) (err error) //sys Statfs(path string, stat *Statfs_t) (err error) diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index 84a9e5277ac..28be1306ec9 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -641,6 +641,36 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil } +// SockaddrCANJ1939 implements the Sockaddr interface for AF_CAN using J1939 +// protocol (https://en.wikipedia.org/wiki/SAE_J1939). For more information +// on the purposes of the fields, check the official linux kernel documentation +// available here: https://www.kernel.org/doc/Documentation/networking/j1939.rst +type SockaddrCANJ1939 struct { + Ifindex int + Name uint64 + PGN uint32 + Addr uint8 + raw RawSockaddrCAN +} + +func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) { + if sa.Ifindex < 0 || sa.Ifindex > 0x7fffffff { + return nil, 0, EINVAL + } + sa.raw.Family = AF_CAN + sa.raw.Ifindex = int32(sa.Ifindex) + n := (*[8]byte)(unsafe.Pointer(&sa.Name)) + for i := 0; i < 8; i++ { + sa.raw.Addr[i] = n[i] + } + p := (*[4]byte)(unsafe.Pointer(&sa.PGN)) + for i := 0; i < 4; i++ { + sa.raw.Addr[i+8] = p[i] + } + sa.raw.Addr[12] = sa.Addr + return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil +} + // SockaddrALG implements the Sockaddr interface for AF_ALG type sockets. // SockaddrALG enables userspace access to the Linux kernel's cryptography // subsystem. The Type and Name fields specify which type of hash or cipher @@ -952,6 +982,10 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) { return unsafe.Pointer(&sa.raw), SizeofSockaddrIUCV, nil } +var socketProtocol = func(fd int) (int, error) { + return GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) +} + func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { switch rsa.Addr.Family { case AF_NETLINK: @@ -1002,7 +1036,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { return sa, nil case AF_INET: - proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + proto, err := socketProtocol(fd) if err != nil { return nil, err } @@ -1028,7 +1062,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } case AF_INET6: - proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + proto, err := socketProtocol(fd) if err != nil { return nil, err } @@ -1063,7 +1097,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { } return sa, nil case AF_BLUETOOTH: - proto, err := GetsockoptInt(fd, SOL_SOCKET, SO_PROTOCOL) + proto, err := socketProtocol(fd) if err != nil { return nil, err } @@ -1150,20 +1184,43 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) { return sa, nil case AF_CAN: - pp := (*RawSockaddrCAN)(unsafe.Pointer(rsa)) - sa := &SockaddrCAN{ - Ifindex: int(pp.Ifindex), - } - rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) - for i := 0; i < 4; i++ { - rx[i] = pp.Addr[i] - } - tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) - for i := 0; i < 4; i++ { - tx[i] = pp.Addr[i+4] + proto, err := socketProtocol(fd) + if err != nil { + return nil, err } - return sa, nil + pp := (*RawSockaddrCAN)(unsafe.Pointer(rsa)) + + switch proto { + case CAN_J1939: + sa := &SockaddrCANJ1939{ + Ifindex: int(pp.Ifindex), + } + name := (*[8]byte)(unsafe.Pointer(&sa.Name)) + for i := 0; i < 8; i++ { + name[i] = pp.Addr[i] + } + pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN)) + for i := 0; i < 4; i++ { + pgn[i] = pp.Addr[i+8] + } + addr := (*[1]byte)(unsafe.Pointer(&sa.Addr)) + addr[0] = pp.Addr[12] + return sa, nil + default: + sa := &SockaddrCAN{ + Ifindex: int(pp.Ifindex), + } + rx := (*[4]byte)(unsafe.Pointer(&sa.RxID)) + for i := 0; i < 4; i++ { + rx[i] = pp.Addr[i] + } + tx := (*[4]byte)(unsafe.Pointer(&sa.TxID)) + for i := 0; i < 4; i++ { + tx[i] = pp.Addr[i+4] + } + return sa, nil + } } return nil, EAFNOSUPPORT } diff --git a/vendor/golang.org/x/sys/unix/timestruct.go b/vendor/golang.org/x/sys/unix/timestruct.go index 4a672f56942..103604299e2 100644 --- a/vendor/golang.org/x/sys/unix/timestruct.go +++ b/vendor/golang.org/x/sys/unix/timestruct.go @@ -8,12 +8,10 @@ package unix import "time" -// TimespecToNsec converts a Timespec value into a number of -// nanoseconds since the Unix epoch. -func TimespecToNsec(ts Timespec) int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } +// TimespecToNSec returns the time stored in ts as nanoseconds. +func TimespecToNsec(ts Timespec) int64 { return ts.Nano() } -// NsecToTimespec takes a number of nanoseconds since the Unix epoch -// and returns the corresponding Timespec value. +// NsecToTimespec converts a number of nanoseconds into a Timespec. func NsecToTimespec(nsec int64) Timespec { sec := nsec / 1e9 nsec = nsec % 1e9 @@ -42,12 +40,10 @@ func TimeToTimespec(t time.Time) (Timespec, error) { return ts, nil } -// TimevalToNsec converts a Timeval value into a number of nanoseconds -// since the Unix epoch. -func TimevalToNsec(tv Timeval) int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1e3 } +// TimevalToNsec returns the time stored in tv as nanoseconds. +func TimevalToNsec(tv Timeval) int64 { return tv.Nano() } -// NsecToTimeval takes a number of nanoseconds since the Unix epoch -// and returns the corresponding Timeval value. +// NsecToTimeval converts a number of nanoseconds into a Timeval. func NsecToTimeval(nsec int64) Timeval { nsec += 999 // round up to microsecond usec := nsec % 1e9 / 1e3 @@ -59,24 +55,22 @@ func NsecToTimeval(nsec int64) Timeval { return setTimeval(sec, usec) } -// Unix returns ts as the number of seconds and nanoseconds elapsed since the -// Unix epoch. +// Unix returns the time stored in ts as seconds plus nanoseconds. func (ts *Timespec) Unix() (sec int64, nsec int64) { return int64(ts.Sec), int64(ts.Nsec) } -// Unix returns tv as the number of seconds and nanoseconds elapsed since the -// Unix epoch. +// Unix returns the time stored in tv as seconds plus nanoseconds. func (tv *Timeval) Unix() (sec int64, nsec int64) { return int64(tv.Sec), int64(tv.Usec) * 1000 } -// Nano returns ts as the number of nanoseconds elapsed since the Unix epoch. +// Nano returns the time stored in ts as nanoseconds. func (ts *Timespec) Nano() int64 { return int64(ts.Sec)*1e9 + int64(ts.Nsec) } -// Nano returns tv as the number of nanoseconds elapsed since the Unix epoch. +// Nano returns the time stored in tv as nanoseconds. func (tv *Timeval) Nano() int64 { return int64(tv.Sec)*1e9 + int64(tv.Usec)*1000 } diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go index b46110354df..f73b4efd07b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go @@ -65,6 +65,7 @@ const ( ALG_OP_ENCRYPT = 0x1 ALG_SET_AEAD_ASSOCLEN = 0x4 ALG_SET_AEAD_AUTHSIZE = 0x5 + ALG_SET_DRBG_ENTROPY = 0x6 ALG_SET_IV = 0x2 ALG_SET_KEY = 0x1 ALG_SET_OP = 0x3 @@ -179,8 +180,10 @@ const ( BPF_F_ANY_ALIGNMENT = 0x2 BPF_F_QUERY_EFFECTIVE = 0x1 BPF_F_REPLACE = 0x4 + BPF_F_SLEEPABLE = 0x10 BPF_F_STRICT_ALIGNMENT = 0x1 BPF_F_TEST_RND_HI32 = 0x4 + BPF_F_TEST_RUN_ON_CPU = 0x1 BPF_F_TEST_STATE_FREQ = 0x8 BPF_H = 0x8 BPF_IMM = 0x0 @@ -219,6 +222,7 @@ const ( BPF_NET_OFF = -0x100000 BPF_OBJ_NAME_LEN = 0x10 BPF_OR = 0x40 + BPF_PSEUDO_BTF_ID = 0x3 BPF_PSEUDO_CALL = 0x1 BPF_PSEUDO_MAP_FD = 0x1 BPF_PSEUDO_MAP_VALUE = 0x2 @@ -429,10 +433,13 @@ const ( DEBUGFS_MAGIC = 0x64626720 DEVLINK_CMD_ESWITCH_MODE_GET = 0x1d DEVLINK_CMD_ESWITCH_MODE_SET = 0x1e + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS = 0x2 + DEVLINK_FLASH_OVERWRITE_SETTINGS = 0x1 DEVLINK_GENL_MCGRP_CONFIG_NAME = "config" DEVLINK_GENL_NAME = "devlink" DEVLINK_GENL_VERSION = 0x1 DEVLINK_SB_THRESHOLD_TO_ALPHA_MAX = 0x14 + DEVLINK_SUPPORTED_FLASH_OVERWRITE_SECTIONS = 0x3 DEVMEM_MAGIC = 0x454d444d DEVPTS_SUPER_MAGIC = 0x1cd1 DMA_BUF_MAGIC = 0x444d4142 @@ -477,9 +484,9 @@ const ( DM_UUID_FLAG = 0x4000 DM_UUID_LEN = 0x81 DM_VERSION = 0xc138fd00 - DM_VERSION_EXTRA = "-ioctl (2020-02-27)" + DM_VERSION_EXTRA = "-ioctl (2020-10-01)" DM_VERSION_MAJOR = 0x4 - DM_VERSION_MINOR = 0x2a + DM_VERSION_MINOR = 0x2b DM_VERSION_PATCHLEVEL = 0x0 DT_BLK = 0x6 DT_CHR = 0x2 @@ -1331,6 +1338,7 @@ const ( MS_NOREMOTELOCK = 0x8000000 MS_NOSEC = 0x10000000 MS_NOSUID = 0x2 + MS_NOSYMFOLLOW = 0x100 MS_NOUSER = -0x80000000 MS_POSIXACL = 0x10000 MS_PRIVATE = 0x40000 @@ -1572,7 +1580,7 @@ const ( PERF_MEM_REMOTE_REMOTE = 0x1 PERF_MEM_REMOTE_SHIFT = 0x25 PERF_MEM_SNOOPX_FWD = 0x1 - PERF_MEM_SNOOPX_SHIFT = 0x25 + PERF_MEM_SNOOPX_SHIFT = 0x26 PERF_MEM_SNOOP_HIT = 0x4 PERF_MEM_SNOOP_HITM = 0x10 PERF_MEM_SNOOP_MISS = 0x8 @@ -1672,6 +1680,13 @@ const ( PR_MCE_KILL_SET = 0x1 PR_MPX_DISABLE_MANAGEMENT = 0x2c PR_MPX_ENABLE_MANAGEMENT = 0x2b + PR_MTE_TAG_MASK = 0x7fff8 + PR_MTE_TAG_SHIFT = 0x3 + PR_MTE_TCF_ASYNC = 0x4 + PR_MTE_TCF_MASK = 0x6 + PR_MTE_TCF_NONE = 0x0 + PR_MTE_TCF_SHIFT = 0x1 + PR_MTE_TCF_SYNC = 0x2 PR_PAC_APDAKEY = 0x4 PR_PAC_APDBKEY = 0x8 PR_PAC_APGAKEY = 0x10 @@ -2206,7 +2221,7 @@ const ( STATX_ATTR_APPEND = 0x20 STATX_ATTR_AUTOMOUNT = 0x1000 STATX_ATTR_COMPRESSED = 0x4 - STATX_ATTR_DAX = 0x2000 + STATX_ATTR_DAX = 0x200000 STATX_ATTR_ENCRYPTED = 0x800 STATX_ATTR_IMMUTABLE = 0x10 STATX_ATTR_MOUNT_ROOT = 0x2000 @@ -2390,6 +2405,7 @@ const ( TIPC_NODE_STATE = 0x0 TIPC_OK = 0x0 TIPC_PUBLISHED = 0x1 + TIPC_REKEYING_NOW = 0xffffffff TIPC_RESERVED_TYPES = 0x40 TIPC_RETDATA = 0x2 TIPC_SERVICE_ADDR = 0x2 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index dd282c08b7f..336e0b326a9 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -4,7 +4,7 @@ // +build 386,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 82fc93c7bbc..961507e937d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -4,7 +4,7 @@ // +build amd64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index fe7094f2763..a65576db7b6 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -4,7 +4,7 @@ // +build arm,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 3b6cc58803b..cf075caa8c8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -4,7 +4,7 @@ // +build arm64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go package unix @@ -196,6 +196,8 @@ const ( PPPIOCXFERUNIT = 0x744e PROT_BTI = 0x10 PR_SET_PTRACER_ANY = 0xffffffffffffffff + PTRACE_PEEKMTETAGS = 0x21 + PTRACE_POKEMTETAGS = 0x22 PTRACE_SYSEMU = 0x1f PTRACE_SYSEMU_SINGLESTEP = 0x20 RLIMIT_AS = 0x9 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index ce3d9ae1561..efe90deeab8 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -4,7 +4,7 @@ // +build mips,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 7a85215ce52..8b0e8911dc3 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -4,7 +4,7 @@ // +build mips64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index 07d4cc1bd5f..e9430cd1a22 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -4,7 +4,7 @@ // +build mips64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index d4842ba1c2a..61e4f5db67c 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -4,7 +4,7 @@ // +build mipsle,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index 941e20dacec..973ad934633 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -4,7 +4,7 @@ // +build ppc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index 63d3bc56627..70a7406ba11 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -4,7 +4,7 @@ // +build ppc64le,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go index 490bee1ab1b..b1bf7997cbd 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go @@ -4,7 +4,7 @@ // +build riscv64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index 467b8218e80..7053d10ba02 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -4,7 +4,7 @@ // +build s390x,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go index 79fbafbcf6c..137cfe79626 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go @@ -4,7 +4,7 @@ // +build sparc64,linux // Code generated by cmd/cgo -godefs; DO NOT EDIT. -// cgo -godefs -- -Wall -Werror -static -I/tmp/include _const.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go package unix diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go index e263fbdb8bf..c8c142c59a0 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.1_13.go @@ -24,7 +24,6 @@ func closedir(dir uintptr) (err error) { func libc_closedir_trampoline() -//go:linkname libc_closedir libc_closedir //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -37,5 +36,4 @@ func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { func libc_readdir_r_trampoline() -//go:linkname libc_readdir_r libc_readdir_r //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go index 6eb45798323..36ab7eab868 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go @@ -25,7 +25,6 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func libc_getgroups_trampoline() -//go:linkname libc_getgroups libc_getgroups //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -40,7 +39,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { func libc_setgroups_trampoline() -//go:linkname libc_setgroups libc_setgroups //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -56,7 +54,6 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err func libc_wait4_trampoline() -//go:linkname libc_wait4 libc_wait4 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -72,7 +69,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { func libc_accept_trampoline() -//go:linkname libc_accept libc_accept //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -87,7 +83,6 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_bind_trampoline() -//go:linkname libc_bind libc_bind //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -102,7 +97,6 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_connect_trampoline() -//go:linkname libc_connect libc_connect //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -118,7 +112,6 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func libc_socket_trampoline() -//go:linkname libc_socket libc_socket //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -133,7 +126,6 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func libc_getsockopt_trampoline() -//go:linkname libc_getsockopt libc_getsockopt //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -148,7 +140,6 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) func libc_setsockopt_trampoline() -//go:linkname libc_setsockopt libc_setsockopt //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -163,7 +154,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getpeername_trampoline() -//go:linkname libc_getpeername libc_getpeername //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -178,7 +168,6 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getsockname_trampoline() -//go:linkname libc_getsockname libc_getsockname //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -193,7 +182,6 @@ func Shutdown(s int, how int) (err error) { func libc_shutdown_trampoline() -//go:linkname libc_shutdown libc_shutdown //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -208,7 +196,6 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { func libc_socketpair_trampoline() -//go:linkname libc_socketpair libc_socketpair //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -230,7 +217,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl func libc_recvfrom_trampoline() -//go:linkname libc_recvfrom libc_recvfrom //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -251,7 +237,6 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( func libc_sendto_trampoline() -//go:linkname libc_sendto libc_sendto //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -267,7 +252,6 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_recvmsg_trampoline() -//go:linkname libc_recvmsg libc_recvmsg //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -283,7 +267,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_sendmsg_trampoline() -//go:linkname libc_sendmsg libc_sendmsg //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -299,7 +282,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne func libc_kevent_trampoline() -//go:linkname libc_kevent libc_kevent //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -319,7 +301,6 @@ func utimes(path string, timeval *[2]Timeval) (err error) { func libc_utimes_trampoline() -//go:linkname libc_utimes libc_utimes //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -334,7 +315,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { func libc_futimes_trampoline() -//go:linkname libc_futimes libc_futimes //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -350,7 +330,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { func libc_poll_trampoline() -//go:linkname libc_poll libc_poll //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -371,7 +350,6 @@ func Madvise(b []byte, behav int) (err error) { func libc_madvise_trampoline() -//go:linkname libc_madvise libc_madvise //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -392,7 +370,6 @@ func Mlock(b []byte) (err error) { func libc_mlock_trampoline() -//go:linkname libc_mlock libc_mlock //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -407,7 +384,6 @@ func Mlockall(flags int) (err error) { func libc_mlockall_trampoline() -//go:linkname libc_mlockall libc_mlockall //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -428,7 +404,6 @@ func Mprotect(b []byte, prot int) (err error) { func libc_mprotect_trampoline() -//go:linkname libc_mprotect libc_mprotect //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -449,7 +424,6 @@ func Msync(b []byte, flags int) (err error) { func libc_msync_trampoline() -//go:linkname libc_msync libc_msync //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -470,7 +444,6 @@ func Munlock(b []byte) (err error) { func libc_munlock_trampoline() -//go:linkname libc_munlock libc_munlock //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -485,7 +458,6 @@ func Munlockall() (err error) { func libc_munlockall_trampoline() -//go:linkname libc_munlockall libc_munlockall //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -502,7 +474,6 @@ func pipe() (r int, w int, err error) { func libc_pipe_trampoline() -//go:linkname libc_pipe libc_pipe //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -528,7 +499,6 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o func libc_getxattr_trampoline() -//go:linkname libc_getxattr libc_getxattr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -549,7 +519,6 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio func libc_fgetxattr_trampoline() -//go:linkname libc_fgetxattr libc_fgetxattr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -574,7 +543,6 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o func libc_setxattr_trampoline() -//go:linkname libc_setxattr libc_setxattr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -594,7 +562,6 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio func libc_fsetxattr_trampoline() -//go:linkname libc_fsetxattr libc_fsetxattr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -619,7 +586,6 @@ func removexattr(path string, attr string, options int) (err error) { func libc_removexattr_trampoline() -//go:linkname libc_removexattr libc_removexattr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -639,7 +605,6 @@ func fremovexattr(fd int, attr string, options int) (err error) { func libc_fremovexattr_trampoline() -//go:linkname libc_fremovexattr libc_fremovexattr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -660,7 +625,6 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro func libc_listxattr_trampoline() -//go:linkname libc_listxattr libc_listxattr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -676,7 +640,6 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { func libc_flistxattr_trampoline() -//go:linkname libc_flistxattr libc_flistxattr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -691,7 +654,6 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp func libc_setattrlist_trampoline() -//go:linkname libc_setattrlist libc_setattrlist //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -707,7 +669,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func libc_fcntl_trampoline() -//go:linkname libc_fcntl libc_fcntl //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -722,7 +683,6 @@ func kill(pid int, signum int, posix int) (err error) { func libc_kill_trampoline() -//go:linkname libc_kill libc_kill //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -737,7 +697,6 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { func libc_ioctl_trampoline() -//go:linkname libc_ioctl libc_ioctl //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -758,7 +717,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) func libc_sysctl_trampoline() -//go:linkname libc_sysctl libc_sysctl //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -773,7 +731,6 @@ func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer func libc_sendfile_trampoline() -//go:linkname libc_sendfile libc_sendfile //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -793,7 +750,6 @@ func Access(path string, mode uint32) (err error) { func libc_access_trampoline() -//go:linkname libc_access libc_access //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -808,7 +764,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { func libc_adjtime_trampoline() -//go:linkname libc_adjtime libc_adjtime //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -828,7 +783,6 @@ func Chdir(path string) (err error) { func libc_chdir_trampoline() -//go:linkname libc_chdir libc_chdir //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -848,7 +802,6 @@ func Chflags(path string, flags int) (err error) { func libc_chflags_trampoline() -//go:linkname libc_chflags libc_chflags //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -868,7 +821,6 @@ func Chmod(path string, mode uint32) (err error) { func libc_chmod_trampoline() -//go:linkname libc_chmod libc_chmod //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -888,7 +840,6 @@ func Chown(path string, uid int, gid int) (err error) { func libc_chown_trampoline() -//go:linkname libc_chown libc_chown //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -908,7 +859,6 @@ func Chroot(path string) (err error) { func libc_chroot_trampoline() -//go:linkname libc_chroot libc_chroot //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -923,7 +873,6 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func libc_clock_gettime_trampoline() -//go:linkname libc_clock_gettime libc_clock_gettime //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -938,7 +887,6 @@ func Close(fd int) (err error) { func libc_close_trampoline() -//go:linkname libc_close libc_close //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -963,7 +911,6 @@ func Clonefile(src string, dst string, flags int) (err error) { func libc_clonefile_trampoline() -//go:linkname libc_clonefile libc_clonefile //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -988,7 +935,6 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) func libc_clonefileat_trampoline() -//go:linkname libc_clonefileat libc_clonefileat //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1004,7 +950,6 @@ func Dup(fd int) (nfd int, err error) { func libc_dup_trampoline() -//go:linkname libc_dup libc_dup //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1019,7 +964,6 @@ func Dup2(from int, to int) (err error) { func libc_dup2_trampoline() -//go:linkname libc_dup2 libc_dup2 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1044,7 +988,6 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { func libc_exchangedata_trampoline() -//go:linkname libc_exchangedata libc_exchangedata //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1056,7 +999,6 @@ func Exit(code int) { func libc_exit_trampoline() -//go:linkname libc_exit libc_exit //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1076,7 +1018,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_faccessat_trampoline() -//go:linkname libc_faccessat libc_faccessat //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1091,7 +1032,6 @@ func Fchdir(fd int) (err error) { func libc_fchdir_trampoline() -//go:linkname libc_fchdir libc_fchdir //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1106,7 +1046,6 @@ func Fchflags(fd int, flags int) (err error) { func libc_fchflags_trampoline() -//go:linkname libc_fchflags libc_fchflags //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1121,7 +1060,6 @@ func Fchmod(fd int, mode uint32) (err error) { func libc_fchmod_trampoline() -//go:linkname libc_fchmod libc_fchmod //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1141,7 +1079,6 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_fchmodat_trampoline() -//go:linkname libc_fchmodat libc_fchmodat //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1156,7 +1093,6 @@ func Fchown(fd int, uid int, gid int) (err error) { func libc_fchown_trampoline() -//go:linkname libc_fchown libc_fchown //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1176,7 +1112,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func libc_fchownat_trampoline() -//go:linkname libc_fchownat libc_fchownat //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1196,7 +1131,6 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) func libc_fclonefileat_trampoline() -//go:linkname libc_fclonefileat libc_fclonefileat //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1211,7 +1145,6 @@ func Flock(fd int, how int) (err error) { func libc_flock_trampoline() -//go:linkname libc_flock libc_flock //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1227,7 +1160,6 @@ func Fpathconf(fd int, name int) (val int, err error) { func libc_fpathconf_trampoline() -//go:linkname libc_fpathconf libc_fpathconf //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1242,7 +1174,6 @@ func Fsync(fd int) (err error) { func libc_fsync_trampoline() -//go:linkname libc_fsync libc_fsync //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1257,7 +1188,6 @@ func Ftruncate(fd int, length int64) (err error) { func libc_ftruncate_trampoline() -//go:linkname libc_ftruncate libc_ftruncate //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1279,7 +1209,6 @@ func Getcwd(buf []byte) (n int, err error) { func libc_getcwd_trampoline() -//go:linkname libc_getcwd libc_getcwd //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1292,7 +1221,6 @@ func Getdtablesize() (size int) { func libc_getdtablesize_trampoline() -//go:linkname libc_getdtablesize libc_getdtablesize //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1305,7 +1233,6 @@ func Getegid() (egid int) { func libc_getegid_trampoline() -//go:linkname libc_getegid libc_getegid //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1318,7 +1245,6 @@ func Geteuid() (uid int) { func libc_geteuid_trampoline() -//go:linkname libc_geteuid libc_geteuid //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1331,7 +1257,6 @@ func Getgid() (gid int) { func libc_getgid_trampoline() -//go:linkname libc_getgid libc_getgid //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1347,7 +1272,6 @@ func Getpgid(pid int) (pgid int, err error) { func libc_getpgid_trampoline() -//go:linkname libc_getpgid libc_getpgid //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1360,7 +1284,6 @@ func Getpgrp() (pgrp int) { func libc_getpgrp_trampoline() -//go:linkname libc_getpgrp libc_getpgrp //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1373,7 +1296,6 @@ func Getpid() (pid int) { func libc_getpid_trampoline() -//go:linkname libc_getpid libc_getpid //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1386,7 +1308,6 @@ func Getppid() (ppid int) { func libc_getppid_trampoline() -//go:linkname libc_getppid libc_getppid //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1402,7 +1323,6 @@ func Getpriority(which int, who int) (prio int, err error) { func libc_getpriority_trampoline() -//go:linkname libc_getpriority libc_getpriority //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1417,7 +1337,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func libc_getrlimit_trampoline() -//go:linkname libc_getrlimit libc_getrlimit //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1432,7 +1351,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { func libc_getrusage_trampoline() -//go:linkname libc_getrusage libc_getrusage //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1448,7 +1366,6 @@ func Getsid(pid int) (sid int, err error) { func libc_getsid_trampoline() -//go:linkname libc_getsid libc_getsid //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1463,7 +1380,6 @@ func Gettimeofday(tp *Timeval) (err error) { func libc_gettimeofday_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1476,7 +1392,6 @@ func Getuid() (uid int) { func libc_getuid_trampoline() -//go:linkname libc_getuid libc_getuid //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1489,7 +1404,6 @@ func Issetugid() (tainted bool) { func libc_issetugid_trampoline() -//go:linkname libc_issetugid libc_issetugid //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1505,7 +1419,6 @@ func Kqueue() (fd int, err error) { func libc_kqueue_trampoline() -//go:linkname libc_kqueue libc_kqueue //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1525,7 +1438,6 @@ func Lchown(path string, uid int, gid int) (err error) { func libc_lchown_trampoline() -//go:linkname libc_lchown libc_lchown //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1550,7 +1462,6 @@ func Link(path string, link string) (err error) { func libc_link_trampoline() -//go:linkname libc_link libc_link //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1575,7 +1486,6 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er func libc_linkat_trampoline() -//go:linkname libc_linkat libc_linkat //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1590,7 +1500,6 @@ func Listen(s int, backlog int) (err error) { func libc_listen_trampoline() -//go:linkname libc_listen libc_listen //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1610,7 +1519,6 @@ func Mkdir(path string, mode uint32) (err error) { func libc_mkdir_trampoline() -//go:linkname libc_mkdir libc_mkdir //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1630,7 +1538,6 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { func libc_mkdirat_trampoline() -//go:linkname libc_mkdirat libc_mkdirat //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1650,7 +1557,6 @@ func Mkfifo(path string, mode uint32) (err error) { func libc_mkfifo_trampoline() -//go:linkname libc_mkfifo libc_mkfifo //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1670,7 +1576,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { func libc_mknod_trampoline() -//go:linkname libc_mknod libc_mknod //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1691,7 +1596,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { func libc_open_trampoline() -//go:linkname libc_open libc_open //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1712,7 +1616,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { func libc_openat_trampoline() -//go:linkname libc_openat libc_openat //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1733,7 +1636,6 @@ func Pathconf(path string, name int) (val int, err error) { func libc_pathconf_trampoline() -//go:linkname libc_pathconf libc_pathconf //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1755,7 +1657,6 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { func libc_pread_trampoline() -//go:linkname libc_pread libc_pread //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1777,7 +1678,6 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { func libc_pwrite_trampoline() -//go:linkname libc_pwrite libc_pwrite //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1799,7 +1699,6 @@ func read(fd int, p []byte) (n int, err error) { func libc_read_trampoline() -//go:linkname libc_read libc_read //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1826,7 +1725,6 @@ func Readlink(path string, buf []byte) (n int, err error) { func libc_readlink_trampoline() -//go:linkname libc_readlink libc_readlink //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1853,7 +1751,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { func libc_readlinkat_trampoline() -//go:linkname libc_readlinkat libc_readlinkat //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1878,7 +1775,6 @@ func Rename(from string, to string) (err error) { func libc_rename_trampoline() -//go:linkname libc_rename libc_rename //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1903,7 +1799,6 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { func libc_renameat_trampoline() -//go:linkname libc_renameat libc_renameat //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1923,7 +1818,6 @@ func Revoke(path string) (err error) { func libc_revoke_trampoline() -//go:linkname libc_revoke libc_revoke //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1943,7 +1837,6 @@ func Rmdir(path string) (err error) { func libc_rmdir_trampoline() -//go:linkname libc_rmdir libc_rmdir //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1959,7 +1852,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { func libc_lseek_trampoline() -//go:linkname libc_lseek libc_lseek //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1975,7 +1867,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func libc_select_trampoline() -//go:linkname libc_select libc_select //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1990,7 +1881,6 @@ func Setegid(egid int) (err error) { func libc_setegid_trampoline() -//go:linkname libc_setegid libc_setegid //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2005,7 +1895,6 @@ func Seteuid(euid int) (err error) { func libc_seteuid_trampoline() -//go:linkname libc_seteuid libc_seteuid //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2020,7 +1909,6 @@ func Setgid(gid int) (err error) { func libc_setgid_trampoline() -//go:linkname libc_setgid libc_setgid //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2040,7 +1928,6 @@ func Setlogin(name string) (err error) { func libc_setlogin_trampoline() -//go:linkname libc_setlogin libc_setlogin //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2055,7 +1942,6 @@ func Setpgid(pid int, pgid int) (err error) { func libc_setpgid_trampoline() -//go:linkname libc_setpgid libc_setpgid //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2070,7 +1956,6 @@ func Setpriority(which int, who int, prio int) (err error) { func libc_setpriority_trampoline() -//go:linkname libc_setpriority libc_setpriority //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2085,7 +1970,6 @@ func Setprivexec(flag int) (err error) { func libc_setprivexec_trampoline() -//go:linkname libc_setprivexec libc_setprivexec //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2100,7 +1984,6 @@ func Setregid(rgid int, egid int) (err error) { func libc_setregid_trampoline() -//go:linkname libc_setregid libc_setregid //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2115,7 +1998,6 @@ func Setreuid(ruid int, euid int) (err error) { func libc_setreuid_trampoline() -//go:linkname libc_setreuid libc_setreuid //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2130,7 +2012,6 @@ func Setrlimit(which int, lim *Rlimit) (err error) { func libc_setrlimit_trampoline() -//go:linkname libc_setrlimit libc_setrlimit //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2146,7 +2027,6 @@ func Setsid() (pid int, err error) { func libc_setsid_trampoline() -//go:linkname libc_setsid libc_setsid //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2161,7 +2041,6 @@ func Settimeofday(tp *Timeval) (err error) { func libc_settimeofday_trampoline() -//go:linkname libc_settimeofday libc_settimeofday //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2176,7 +2055,6 @@ func Setuid(uid int) (err error) { func libc_setuid_trampoline() -//go:linkname libc_setuid libc_setuid //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2201,7 +2079,6 @@ func Symlink(path string, link string) (err error) { func libc_symlink_trampoline() -//go:linkname libc_symlink libc_symlink //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2226,7 +2103,6 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { func libc_symlinkat_trampoline() -//go:linkname libc_symlinkat libc_symlinkat //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2241,7 +2117,6 @@ func Sync() (err error) { func libc_sync_trampoline() -//go:linkname libc_sync libc_sync //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2261,7 +2136,6 @@ func Truncate(path string, length int64) (err error) { func libc_truncate_trampoline() -//go:linkname libc_truncate libc_truncate //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2274,7 +2148,6 @@ func Umask(newmask int) (oldmask int) { func libc_umask_trampoline() -//go:linkname libc_umask libc_umask //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2294,7 +2167,6 @@ func Undelete(path string) (err error) { func libc_undelete_trampoline() -//go:linkname libc_undelete libc_undelete //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2314,7 +2186,6 @@ func Unlink(path string) (err error) { func libc_unlink_trampoline() -//go:linkname libc_unlink libc_unlink //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2334,7 +2205,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func libc_unlinkat_trampoline() -//go:linkname libc_unlinkat libc_unlinkat //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2354,7 +2224,6 @@ func Unmount(path string, flags int) (err error) { func libc_unmount_trampoline() -//go:linkname libc_unmount libc_unmount //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2376,7 +2245,6 @@ func write(fd int, p []byte) (n int, err error) { func libc_write_trampoline() -//go:linkname libc_write libc_write //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2392,7 +2260,6 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func libc_mmap_trampoline() -//go:linkname libc_mmap libc_mmap //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2407,7 +2274,6 @@ func munmap(addr uintptr, length uintptr) (err error) { func libc_munmap_trampoline() -//go:linkname libc_munmap libc_munmap //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2444,7 +2310,6 @@ func Fstat(fd int, stat *Stat_t) (err error) { func libc_fstat64_trampoline() -//go:linkname libc_fstat64 libc_fstat64 //go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2464,7 +2329,6 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func libc_fstatat64_trampoline() -//go:linkname libc_fstatat64 libc_fstatat64 //go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2479,7 +2343,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { func libc_fstatfs64_trampoline() -//go:linkname libc_fstatfs64 libc_fstatfs64 //go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2495,7 +2358,6 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { func libc_getfsstat64_trampoline() -//go:linkname libc_getfsstat64 libc_getfsstat64 //go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2515,12 +2377,11 @@ func Lstat(path string, stat *Stat_t) (err error) { func libc_lstat64_trampoline() -//go:linkname libc_lstat64 libc_lstat64 //go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { +func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) @@ -2530,7 +2391,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { func libc_ptrace_trampoline() -//go:linkname libc_ptrace libc_ptrace //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2550,7 +2410,6 @@ func Stat(path string, stat *Stat_t) (err error) { func libc_stat64_trampoline() -//go:linkname libc_stat64 libc_stat64 //go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2570,5 +2429,4 @@ func Statfs(path string, stat *Statfs_t) (err error) { func libc_statfs64_trampoline() -//go:linkname libc_statfs64 libc_statfs64 //go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go index 314042a9d42..88826236136 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.1_13.go @@ -24,7 +24,6 @@ func closedir(dir uintptr) (err error) { func libc_closedir_trampoline() -//go:linkname libc_closedir libc_closedir //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -37,5 +36,4 @@ func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { func libc_readdir_r_trampoline() -//go:linkname libc_readdir_r libc_readdir_r //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go index 889c14059e9..7b854cc2900 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go @@ -25,7 +25,6 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func libc_getgroups_trampoline() -//go:linkname libc_getgroups libc_getgroups //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -40,7 +39,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { func libc_setgroups_trampoline() -//go:linkname libc_setgroups libc_setgroups //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -56,7 +54,6 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err func libc_wait4_trampoline() -//go:linkname libc_wait4 libc_wait4 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -72,7 +69,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { func libc_accept_trampoline() -//go:linkname libc_accept libc_accept //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -87,7 +83,6 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_bind_trampoline() -//go:linkname libc_bind libc_bind //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -102,7 +97,6 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_connect_trampoline() -//go:linkname libc_connect libc_connect //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -118,7 +112,6 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func libc_socket_trampoline() -//go:linkname libc_socket libc_socket //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -133,7 +126,6 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func libc_getsockopt_trampoline() -//go:linkname libc_getsockopt libc_getsockopt //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -148,7 +140,6 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) func libc_setsockopt_trampoline() -//go:linkname libc_setsockopt libc_setsockopt //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -163,7 +154,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getpeername_trampoline() -//go:linkname libc_getpeername libc_getpeername //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -178,7 +168,6 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getsockname_trampoline() -//go:linkname libc_getsockname libc_getsockname //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -193,7 +182,6 @@ func Shutdown(s int, how int) (err error) { func libc_shutdown_trampoline() -//go:linkname libc_shutdown libc_shutdown //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -208,7 +196,6 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { func libc_socketpair_trampoline() -//go:linkname libc_socketpair libc_socketpair //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -230,7 +217,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl func libc_recvfrom_trampoline() -//go:linkname libc_recvfrom libc_recvfrom //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -251,7 +237,6 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( func libc_sendto_trampoline() -//go:linkname libc_sendto libc_sendto //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -267,7 +252,6 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_recvmsg_trampoline() -//go:linkname libc_recvmsg libc_recvmsg //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -283,7 +267,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_sendmsg_trampoline() -//go:linkname libc_sendmsg libc_sendmsg //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -299,7 +282,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne func libc_kevent_trampoline() -//go:linkname libc_kevent libc_kevent //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -319,7 +301,6 @@ func utimes(path string, timeval *[2]Timeval) (err error) { func libc_utimes_trampoline() -//go:linkname libc_utimes libc_utimes //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -334,7 +315,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { func libc_futimes_trampoline() -//go:linkname libc_futimes libc_futimes //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -350,7 +330,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { func libc_poll_trampoline() -//go:linkname libc_poll libc_poll //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -371,7 +350,6 @@ func Madvise(b []byte, behav int) (err error) { func libc_madvise_trampoline() -//go:linkname libc_madvise libc_madvise //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -392,7 +370,6 @@ func Mlock(b []byte) (err error) { func libc_mlock_trampoline() -//go:linkname libc_mlock libc_mlock //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -407,7 +384,6 @@ func Mlockall(flags int) (err error) { func libc_mlockall_trampoline() -//go:linkname libc_mlockall libc_mlockall //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -428,7 +404,6 @@ func Mprotect(b []byte, prot int) (err error) { func libc_mprotect_trampoline() -//go:linkname libc_mprotect libc_mprotect //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -449,7 +424,6 @@ func Msync(b []byte, flags int) (err error) { func libc_msync_trampoline() -//go:linkname libc_msync libc_msync //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -470,7 +444,6 @@ func Munlock(b []byte) (err error) { func libc_munlock_trampoline() -//go:linkname libc_munlock libc_munlock //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -485,7 +458,6 @@ func Munlockall() (err error) { func libc_munlockall_trampoline() -//go:linkname libc_munlockall libc_munlockall //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -502,7 +474,6 @@ func pipe() (r int, w int, err error) { func libc_pipe_trampoline() -//go:linkname libc_pipe libc_pipe //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -528,7 +499,6 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o func libc_getxattr_trampoline() -//go:linkname libc_getxattr libc_getxattr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -549,7 +519,6 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio func libc_fgetxattr_trampoline() -//go:linkname libc_fgetxattr libc_fgetxattr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -574,7 +543,6 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o func libc_setxattr_trampoline() -//go:linkname libc_setxattr libc_setxattr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -594,7 +562,6 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio func libc_fsetxattr_trampoline() -//go:linkname libc_fsetxattr libc_fsetxattr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -619,7 +586,6 @@ func removexattr(path string, attr string, options int) (err error) { func libc_removexattr_trampoline() -//go:linkname libc_removexattr libc_removexattr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -639,7 +605,6 @@ func fremovexattr(fd int, attr string, options int) (err error) { func libc_fremovexattr_trampoline() -//go:linkname libc_fremovexattr libc_fremovexattr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -660,7 +625,6 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro func libc_listxattr_trampoline() -//go:linkname libc_listxattr libc_listxattr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -676,7 +640,6 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { func libc_flistxattr_trampoline() -//go:linkname libc_flistxattr libc_flistxattr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -691,7 +654,6 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp func libc_setattrlist_trampoline() -//go:linkname libc_setattrlist libc_setattrlist //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -707,7 +669,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func libc_fcntl_trampoline() -//go:linkname libc_fcntl libc_fcntl //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -722,7 +683,6 @@ func kill(pid int, signum int, posix int) (err error) { func libc_kill_trampoline() -//go:linkname libc_kill libc_kill //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -737,7 +697,6 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { func libc_ioctl_trampoline() -//go:linkname libc_ioctl libc_ioctl //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -758,7 +717,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) func libc_sysctl_trampoline() -//go:linkname libc_sysctl libc_sysctl //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -773,7 +731,6 @@ func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer func libc_sendfile_trampoline() -//go:linkname libc_sendfile libc_sendfile //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -793,7 +750,6 @@ func Access(path string, mode uint32) (err error) { func libc_access_trampoline() -//go:linkname libc_access libc_access //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -808,7 +764,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { func libc_adjtime_trampoline() -//go:linkname libc_adjtime libc_adjtime //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -828,7 +783,6 @@ func Chdir(path string) (err error) { func libc_chdir_trampoline() -//go:linkname libc_chdir libc_chdir //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -848,7 +802,6 @@ func Chflags(path string, flags int) (err error) { func libc_chflags_trampoline() -//go:linkname libc_chflags libc_chflags //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -868,7 +821,6 @@ func Chmod(path string, mode uint32) (err error) { func libc_chmod_trampoline() -//go:linkname libc_chmod libc_chmod //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -888,7 +840,6 @@ func Chown(path string, uid int, gid int) (err error) { func libc_chown_trampoline() -//go:linkname libc_chown libc_chown //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -908,7 +859,6 @@ func Chroot(path string) (err error) { func libc_chroot_trampoline() -//go:linkname libc_chroot libc_chroot //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -923,7 +873,6 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func libc_clock_gettime_trampoline() -//go:linkname libc_clock_gettime libc_clock_gettime //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -938,7 +887,6 @@ func Close(fd int) (err error) { func libc_close_trampoline() -//go:linkname libc_close libc_close //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -963,7 +911,6 @@ func Clonefile(src string, dst string, flags int) (err error) { func libc_clonefile_trampoline() -//go:linkname libc_clonefile libc_clonefile //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -988,7 +935,6 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) func libc_clonefileat_trampoline() -//go:linkname libc_clonefileat libc_clonefileat //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1004,7 +950,6 @@ func Dup(fd int) (nfd int, err error) { func libc_dup_trampoline() -//go:linkname libc_dup libc_dup //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1019,7 +964,6 @@ func Dup2(from int, to int) (err error) { func libc_dup2_trampoline() -//go:linkname libc_dup2 libc_dup2 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1044,7 +988,6 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { func libc_exchangedata_trampoline() -//go:linkname libc_exchangedata libc_exchangedata //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1056,7 +999,6 @@ func Exit(code int) { func libc_exit_trampoline() -//go:linkname libc_exit libc_exit //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1076,7 +1018,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_faccessat_trampoline() -//go:linkname libc_faccessat libc_faccessat //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1091,7 +1032,6 @@ func Fchdir(fd int) (err error) { func libc_fchdir_trampoline() -//go:linkname libc_fchdir libc_fchdir //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1106,7 +1046,6 @@ func Fchflags(fd int, flags int) (err error) { func libc_fchflags_trampoline() -//go:linkname libc_fchflags libc_fchflags //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1121,7 +1060,6 @@ func Fchmod(fd int, mode uint32) (err error) { func libc_fchmod_trampoline() -//go:linkname libc_fchmod libc_fchmod //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1141,7 +1079,6 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_fchmodat_trampoline() -//go:linkname libc_fchmodat libc_fchmodat //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1156,7 +1093,6 @@ func Fchown(fd int, uid int, gid int) (err error) { func libc_fchown_trampoline() -//go:linkname libc_fchown libc_fchown //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1176,7 +1112,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func libc_fchownat_trampoline() -//go:linkname libc_fchownat libc_fchownat //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1196,7 +1131,6 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) func libc_fclonefileat_trampoline() -//go:linkname libc_fclonefileat libc_fclonefileat //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1211,7 +1145,6 @@ func Flock(fd int, how int) (err error) { func libc_flock_trampoline() -//go:linkname libc_flock libc_flock //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1227,7 +1160,6 @@ func Fpathconf(fd int, name int) (val int, err error) { func libc_fpathconf_trampoline() -//go:linkname libc_fpathconf libc_fpathconf //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1242,7 +1174,6 @@ func Fsync(fd int) (err error) { func libc_fsync_trampoline() -//go:linkname libc_fsync libc_fsync //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1257,7 +1188,6 @@ func Ftruncate(fd int, length int64) (err error) { func libc_ftruncate_trampoline() -//go:linkname libc_ftruncate libc_ftruncate //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1279,7 +1209,6 @@ func Getcwd(buf []byte) (n int, err error) { func libc_getcwd_trampoline() -//go:linkname libc_getcwd libc_getcwd //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1292,7 +1221,6 @@ func Getdtablesize() (size int) { func libc_getdtablesize_trampoline() -//go:linkname libc_getdtablesize libc_getdtablesize //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1305,7 +1233,6 @@ func Getegid() (egid int) { func libc_getegid_trampoline() -//go:linkname libc_getegid libc_getegid //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1318,7 +1245,6 @@ func Geteuid() (uid int) { func libc_geteuid_trampoline() -//go:linkname libc_geteuid libc_geteuid //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1331,7 +1257,6 @@ func Getgid() (gid int) { func libc_getgid_trampoline() -//go:linkname libc_getgid libc_getgid //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1347,7 +1272,6 @@ func Getpgid(pid int) (pgid int, err error) { func libc_getpgid_trampoline() -//go:linkname libc_getpgid libc_getpgid //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1360,7 +1284,6 @@ func Getpgrp() (pgrp int) { func libc_getpgrp_trampoline() -//go:linkname libc_getpgrp libc_getpgrp //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1373,7 +1296,6 @@ func Getpid() (pid int) { func libc_getpid_trampoline() -//go:linkname libc_getpid libc_getpid //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1386,7 +1308,6 @@ func Getppid() (ppid int) { func libc_getppid_trampoline() -//go:linkname libc_getppid libc_getppid //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1402,7 +1323,6 @@ func Getpriority(which int, who int) (prio int, err error) { func libc_getpriority_trampoline() -//go:linkname libc_getpriority libc_getpriority //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1417,7 +1337,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func libc_getrlimit_trampoline() -//go:linkname libc_getrlimit libc_getrlimit //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1432,7 +1351,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { func libc_getrusage_trampoline() -//go:linkname libc_getrusage libc_getrusage //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1448,7 +1366,6 @@ func Getsid(pid int) (sid int, err error) { func libc_getsid_trampoline() -//go:linkname libc_getsid libc_getsid //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1463,7 +1380,6 @@ func Gettimeofday(tp *Timeval) (err error) { func libc_gettimeofday_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1476,7 +1392,6 @@ func Getuid() (uid int) { func libc_getuid_trampoline() -//go:linkname libc_getuid libc_getuid //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1489,7 +1404,6 @@ func Issetugid() (tainted bool) { func libc_issetugid_trampoline() -//go:linkname libc_issetugid libc_issetugid //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1505,7 +1419,6 @@ func Kqueue() (fd int, err error) { func libc_kqueue_trampoline() -//go:linkname libc_kqueue libc_kqueue //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1525,7 +1438,6 @@ func Lchown(path string, uid int, gid int) (err error) { func libc_lchown_trampoline() -//go:linkname libc_lchown libc_lchown //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1550,7 +1462,6 @@ func Link(path string, link string) (err error) { func libc_link_trampoline() -//go:linkname libc_link libc_link //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1575,7 +1486,6 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er func libc_linkat_trampoline() -//go:linkname libc_linkat libc_linkat //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1590,7 +1500,6 @@ func Listen(s int, backlog int) (err error) { func libc_listen_trampoline() -//go:linkname libc_listen libc_listen //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1610,7 +1519,6 @@ func Mkdir(path string, mode uint32) (err error) { func libc_mkdir_trampoline() -//go:linkname libc_mkdir libc_mkdir //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1630,7 +1538,6 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { func libc_mkdirat_trampoline() -//go:linkname libc_mkdirat libc_mkdirat //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1650,7 +1557,6 @@ func Mkfifo(path string, mode uint32) (err error) { func libc_mkfifo_trampoline() -//go:linkname libc_mkfifo libc_mkfifo //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1670,7 +1576,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { func libc_mknod_trampoline() -//go:linkname libc_mknod libc_mknod //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1691,7 +1596,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { func libc_open_trampoline() -//go:linkname libc_open libc_open //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1712,7 +1616,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { func libc_openat_trampoline() -//go:linkname libc_openat libc_openat //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1733,7 +1636,6 @@ func Pathconf(path string, name int) (val int, err error) { func libc_pathconf_trampoline() -//go:linkname libc_pathconf libc_pathconf //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1755,7 +1657,6 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { func libc_pread_trampoline() -//go:linkname libc_pread libc_pread //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1777,7 +1678,6 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { func libc_pwrite_trampoline() -//go:linkname libc_pwrite libc_pwrite //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1799,7 +1699,6 @@ func read(fd int, p []byte) (n int, err error) { func libc_read_trampoline() -//go:linkname libc_read libc_read //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1826,7 +1725,6 @@ func Readlink(path string, buf []byte) (n int, err error) { func libc_readlink_trampoline() -//go:linkname libc_readlink libc_readlink //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1853,7 +1751,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { func libc_readlinkat_trampoline() -//go:linkname libc_readlinkat libc_readlinkat //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1878,7 +1775,6 @@ func Rename(from string, to string) (err error) { func libc_rename_trampoline() -//go:linkname libc_rename libc_rename //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1903,7 +1799,6 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { func libc_renameat_trampoline() -//go:linkname libc_renameat libc_renameat //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1923,7 +1818,6 @@ func Revoke(path string) (err error) { func libc_revoke_trampoline() -//go:linkname libc_revoke libc_revoke //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1943,7 +1837,6 @@ func Rmdir(path string) (err error) { func libc_rmdir_trampoline() -//go:linkname libc_rmdir libc_rmdir //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1959,7 +1852,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { func libc_lseek_trampoline() -//go:linkname libc_lseek libc_lseek //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1975,7 +1867,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func libc_select_trampoline() -//go:linkname libc_select libc_select //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1990,7 +1881,6 @@ func Setegid(egid int) (err error) { func libc_setegid_trampoline() -//go:linkname libc_setegid libc_setegid //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2005,7 +1895,6 @@ func Seteuid(euid int) (err error) { func libc_seteuid_trampoline() -//go:linkname libc_seteuid libc_seteuid //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2020,7 +1909,6 @@ func Setgid(gid int) (err error) { func libc_setgid_trampoline() -//go:linkname libc_setgid libc_setgid //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2040,7 +1928,6 @@ func Setlogin(name string) (err error) { func libc_setlogin_trampoline() -//go:linkname libc_setlogin libc_setlogin //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2055,7 +1942,6 @@ func Setpgid(pid int, pgid int) (err error) { func libc_setpgid_trampoline() -//go:linkname libc_setpgid libc_setpgid //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2070,7 +1956,6 @@ func Setpriority(which int, who int, prio int) (err error) { func libc_setpriority_trampoline() -//go:linkname libc_setpriority libc_setpriority //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2085,7 +1970,6 @@ func Setprivexec(flag int) (err error) { func libc_setprivexec_trampoline() -//go:linkname libc_setprivexec libc_setprivexec //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2100,7 +1984,6 @@ func Setregid(rgid int, egid int) (err error) { func libc_setregid_trampoline() -//go:linkname libc_setregid libc_setregid //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2115,7 +1998,6 @@ func Setreuid(ruid int, euid int) (err error) { func libc_setreuid_trampoline() -//go:linkname libc_setreuid libc_setreuid //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2130,7 +2012,6 @@ func Setrlimit(which int, lim *Rlimit) (err error) { func libc_setrlimit_trampoline() -//go:linkname libc_setrlimit libc_setrlimit //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2146,7 +2027,6 @@ func Setsid() (pid int, err error) { func libc_setsid_trampoline() -//go:linkname libc_setsid libc_setsid //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2161,7 +2041,6 @@ func Settimeofday(tp *Timeval) (err error) { func libc_settimeofday_trampoline() -//go:linkname libc_settimeofday libc_settimeofday //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2176,7 +2055,6 @@ func Setuid(uid int) (err error) { func libc_setuid_trampoline() -//go:linkname libc_setuid libc_setuid //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2201,7 +2079,6 @@ func Symlink(path string, link string) (err error) { func libc_symlink_trampoline() -//go:linkname libc_symlink libc_symlink //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2226,7 +2103,6 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { func libc_symlinkat_trampoline() -//go:linkname libc_symlinkat libc_symlinkat //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2241,7 +2117,6 @@ func Sync() (err error) { func libc_sync_trampoline() -//go:linkname libc_sync libc_sync //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2261,7 +2136,6 @@ func Truncate(path string, length int64) (err error) { func libc_truncate_trampoline() -//go:linkname libc_truncate libc_truncate //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2274,7 +2148,6 @@ func Umask(newmask int) (oldmask int) { func libc_umask_trampoline() -//go:linkname libc_umask libc_umask //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2294,7 +2167,6 @@ func Undelete(path string) (err error) { func libc_undelete_trampoline() -//go:linkname libc_undelete libc_undelete //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2314,7 +2186,6 @@ func Unlink(path string) (err error) { func libc_unlink_trampoline() -//go:linkname libc_unlink libc_unlink //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2334,7 +2205,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func libc_unlinkat_trampoline() -//go:linkname libc_unlinkat libc_unlinkat //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2354,7 +2224,6 @@ func Unmount(path string, flags int) (err error) { func libc_unmount_trampoline() -//go:linkname libc_unmount libc_unmount //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2376,7 +2245,6 @@ func write(fd int, p []byte) (n int, err error) { func libc_write_trampoline() -//go:linkname libc_write libc_write //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2392,7 +2260,6 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func libc_mmap_trampoline() -//go:linkname libc_mmap libc_mmap //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2407,7 +2274,6 @@ func munmap(addr uintptr, length uintptr) (err error) { func libc_munmap_trampoline() -//go:linkname libc_munmap libc_munmap //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2444,7 +2310,6 @@ func Fstat(fd int, stat *Stat_t) (err error) { func libc_fstat64_trampoline() -//go:linkname libc_fstat64 libc_fstat64 //go:cgo_import_dynamic libc_fstat64 fstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2464,7 +2329,6 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func libc_fstatat64_trampoline() -//go:linkname libc_fstatat64 libc_fstatat64 //go:cgo_import_dynamic libc_fstatat64 fstatat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2479,7 +2343,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { func libc_fstatfs64_trampoline() -//go:linkname libc_fstatfs64 libc_fstatfs64 //go:cgo_import_dynamic libc_fstatfs64 fstatfs64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2495,7 +2358,6 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { func libc_getfsstat64_trampoline() -//go:linkname libc_getfsstat64 libc_getfsstat64 //go:cgo_import_dynamic libc_getfsstat64 getfsstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2515,12 +2377,11 @@ func Lstat(path string, stat *Stat_t) (err error) { func libc_lstat64_trampoline() -//go:linkname libc_lstat64 libc_lstat64 //go:cgo_import_dynamic libc_lstat64 lstat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { +func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) @@ -2530,7 +2391,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { func libc_ptrace_trampoline() -//go:linkname libc_ptrace libc_ptrace //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2550,7 +2410,6 @@ func Stat(path string, stat *Stat_t) (err error) { func libc_stat64_trampoline() -//go:linkname libc_stat64 libc_stat64 //go:cgo_import_dynamic libc_stat64 stat64 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2570,5 +2429,4 @@ func Statfs(path string, stat *Statfs_t) (err error) { func libc_statfs64_trampoline() -//go:linkname libc_statfs64 libc_statfs64 //go:cgo_import_dynamic libc_statfs64 statfs64 "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go index f519ce9afb3..de4738fff80 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.1_13.go @@ -24,7 +24,6 @@ func closedir(dir uintptr) (err error) { func libc_closedir_trampoline() -//go:linkname libc_closedir libc_closedir //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -37,5 +36,4 @@ func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { func libc_readdir_r_trampoline() -//go:linkname libc_readdir_r libc_readdir_r //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go index d6b5249c2f2..8e79ad377be 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go @@ -25,7 +25,6 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func libc_getgroups_trampoline() -//go:linkname libc_getgroups libc_getgroups //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -40,7 +39,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { func libc_setgroups_trampoline() -//go:linkname libc_setgroups libc_setgroups //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -56,7 +54,6 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err func libc_wait4_trampoline() -//go:linkname libc_wait4 libc_wait4 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -72,7 +69,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { func libc_accept_trampoline() -//go:linkname libc_accept libc_accept //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -87,7 +83,6 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_bind_trampoline() -//go:linkname libc_bind libc_bind //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -102,7 +97,6 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_connect_trampoline() -//go:linkname libc_connect libc_connect //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -118,7 +112,6 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func libc_socket_trampoline() -//go:linkname libc_socket libc_socket //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -133,7 +126,6 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func libc_getsockopt_trampoline() -//go:linkname libc_getsockopt libc_getsockopt //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -148,7 +140,6 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) func libc_setsockopt_trampoline() -//go:linkname libc_setsockopt libc_setsockopt //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -163,7 +154,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getpeername_trampoline() -//go:linkname libc_getpeername libc_getpeername //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -178,7 +168,6 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getsockname_trampoline() -//go:linkname libc_getsockname libc_getsockname //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -193,7 +182,6 @@ func Shutdown(s int, how int) (err error) { func libc_shutdown_trampoline() -//go:linkname libc_shutdown libc_shutdown //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -208,7 +196,6 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { func libc_socketpair_trampoline() -//go:linkname libc_socketpair libc_socketpair //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -230,7 +217,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl func libc_recvfrom_trampoline() -//go:linkname libc_recvfrom libc_recvfrom //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -251,7 +237,6 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( func libc_sendto_trampoline() -//go:linkname libc_sendto libc_sendto //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -267,7 +252,6 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_recvmsg_trampoline() -//go:linkname libc_recvmsg libc_recvmsg //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -283,7 +267,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_sendmsg_trampoline() -//go:linkname libc_sendmsg libc_sendmsg //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -299,7 +282,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne func libc_kevent_trampoline() -//go:linkname libc_kevent libc_kevent //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -319,7 +301,6 @@ func utimes(path string, timeval *[2]Timeval) (err error) { func libc_utimes_trampoline() -//go:linkname libc_utimes libc_utimes //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -334,7 +315,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { func libc_futimes_trampoline() -//go:linkname libc_futimes libc_futimes //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -350,7 +330,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { func libc_poll_trampoline() -//go:linkname libc_poll libc_poll //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -371,7 +350,6 @@ func Madvise(b []byte, behav int) (err error) { func libc_madvise_trampoline() -//go:linkname libc_madvise libc_madvise //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -392,7 +370,6 @@ func Mlock(b []byte) (err error) { func libc_mlock_trampoline() -//go:linkname libc_mlock libc_mlock //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -407,7 +384,6 @@ func Mlockall(flags int) (err error) { func libc_mlockall_trampoline() -//go:linkname libc_mlockall libc_mlockall //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -428,7 +404,6 @@ func Mprotect(b []byte, prot int) (err error) { func libc_mprotect_trampoline() -//go:linkname libc_mprotect libc_mprotect //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -449,7 +424,6 @@ func Msync(b []byte, flags int) (err error) { func libc_msync_trampoline() -//go:linkname libc_msync libc_msync //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -470,7 +444,6 @@ func Munlock(b []byte) (err error) { func libc_munlock_trampoline() -//go:linkname libc_munlock libc_munlock //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -485,7 +458,6 @@ func Munlockall() (err error) { func libc_munlockall_trampoline() -//go:linkname libc_munlockall libc_munlockall //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -502,7 +474,6 @@ func pipe() (r int, w int, err error) { func libc_pipe_trampoline() -//go:linkname libc_pipe libc_pipe //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -528,7 +499,6 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o func libc_getxattr_trampoline() -//go:linkname libc_getxattr libc_getxattr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -549,7 +519,6 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio func libc_fgetxattr_trampoline() -//go:linkname libc_fgetxattr libc_fgetxattr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -574,7 +543,6 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o func libc_setxattr_trampoline() -//go:linkname libc_setxattr libc_setxattr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -594,7 +562,6 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio func libc_fsetxattr_trampoline() -//go:linkname libc_fsetxattr libc_fsetxattr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -619,7 +586,6 @@ func removexattr(path string, attr string, options int) (err error) { func libc_removexattr_trampoline() -//go:linkname libc_removexattr libc_removexattr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -639,7 +605,6 @@ func fremovexattr(fd int, attr string, options int) (err error) { func libc_fremovexattr_trampoline() -//go:linkname libc_fremovexattr libc_fremovexattr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -660,7 +625,6 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro func libc_listxattr_trampoline() -//go:linkname libc_listxattr libc_listxattr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -676,7 +640,6 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { func libc_flistxattr_trampoline() -//go:linkname libc_flistxattr libc_flistxattr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -691,7 +654,6 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp func libc_setattrlist_trampoline() -//go:linkname libc_setattrlist libc_setattrlist //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -707,7 +669,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func libc_fcntl_trampoline() -//go:linkname libc_fcntl libc_fcntl //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -722,7 +683,6 @@ func kill(pid int, signum int, posix int) (err error) { func libc_kill_trampoline() -//go:linkname libc_kill libc_kill //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -737,7 +697,6 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { func libc_ioctl_trampoline() -//go:linkname libc_ioctl libc_ioctl //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -758,7 +717,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) func libc_sysctl_trampoline() -//go:linkname libc_sysctl libc_sysctl //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -773,7 +731,6 @@ func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer func libc_sendfile_trampoline() -//go:linkname libc_sendfile libc_sendfile //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -793,7 +750,6 @@ func Access(path string, mode uint32) (err error) { func libc_access_trampoline() -//go:linkname libc_access libc_access //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -808,7 +764,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { func libc_adjtime_trampoline() -//go:linkname libc_adjtime libc_adjtime //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -828,7 +783,6 @@ func Chdir(path string) (err error) { func libc_chdir_trampoline() -//go:linkname libc_chdir libc_chdir //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -848,7 +802,6 @@ func Chflags(path string, flags int) (err error) { func libc_chflags_trampoline() -//go:linkname libc_chflags libc_chflags //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -868,7 +821,6 @@ func Chmod(path string, mode uint32) (err error) { func libc_chmod_trampoline() -//go:linkname libc_chmod libc_chmod //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -888,7 +840,6 @@ func Chown(path string, uid int, gid int) (err error) { func libc_chown_trampoline() -//go:linkname libc_chown libc_chown //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -908,7 +859,6 @@ func Chroot(path string) (err error) { func libc_chroot_trampoline() -//go:linkname libc_chroot libc_chroot //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -923,7 +873,6 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func libc_clock_gettime_trampoline() -//go:linkname libc_clock_gettime libc_clock_gettime //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -938,7 +887,6 @@ func Close(fd int) (err error) { func libc_close_trampoline() -//go:linkname libc_close libc_close //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -963,7 +911,6 @@ func Clonefile(src string, dst string, flags int) (err error) { func libc_clonefile_trampoline() -//go:linkname libc_clonefile libc_clonefile //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -988,7 +935,6 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) func libc_clonefileat_trampoline() -//go:linkname libc_clonefileat libc_clonefileat //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1004,7 +950,6 @@ func Dup(fd int) (nfd int, err error) { func libc_dup_trampoline() -//go:linkname libc_dup libc_dup //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1019,7 +964,6 @@ func Dup2(from int, to int) (err error) { func libc_dup2_trampoline() -//go:linkname libc_dup2 libc_dup2 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1044,7 +988,6 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { func libc_exchangedata_trampoline() -//go:linkname libc_exchangedata libc_exchangedata //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1056,7 +999,6 @@ func Exit(code int) { func libc_exit_trampoline() -//go:linkname libc_exit libc_exit //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1076,7 +1018,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_faccessat_trampoline() -//go:linkname libc_faccessat libc_faccessat //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1091,7 +1032,6 @@ func Fchdir(fd int) (err error) { func libc_fchdir_trampoline() -//go:linkname libc_fchdir libc_fchdir //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1106,7 +1046,6 @@ func Fchflags(fd int, flags int) (err error) { func libc_fchflags_trampoline() -//go:linkname libc_fchflags libc_fchflags //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1121,7 +1060,6 @@ func Fchmod(fd int, mode uint32) (err error) { func libc_fchmod_trampoline() -//go:linkname libc_fchmod libc_fchmod //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1141,7 +1079,6 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_fchmodat_trampoline() -//go:linkname libc_fchmodat libc_fchmodat //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1156,7 +1093,6 @@ func Fchown(fd int, uid int, gid int) (err error) { func libc_fchown_trampoline() -//go:linkname libc_fchown libc_fchown //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1176,7 +1112,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func libc_fchownat_trampoline() -//go:linkname libc_fchownat libc_fchownat //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1196,7 +1131,6 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) func libc_fclonefileat_trampoline() -//go:linkname libc_fclonefileat libc_fclonefileat //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1211,7 +1145,6 @@ func Flock(fd int, how int) (err error) { func libc_flock_trampoline() -//go:linkname libc_flock libc_flock //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1227,7 +1160,6 @@ func Fpathconf(fd int, name int) (val int, err error) { func libc_fpathconf_trampoline() -//go:linkname libc_fpathconf libc_fpathconf //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1242,7 +1174,6 @@ func Fsync(fd int) (err error) { func libc_fsync_trampoline() -//go:linkname libc_fsync libc_fsync //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1257,7 +1188,6 @@ func Ftruncate(fd int, length int64) (err error) { func libc_ftruncate_trampoline() -//go:linkname libc_ftruncate libc_ftruncate //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1279,7 +1209,6 @@ func Getcwd(buf []byte) (n int, err error) { func libc_getcwd_trampoline() -//go:linkname libc_getcwd libc_getcwd //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1292,7 +1221,6 @@ func Getdtablesize() (size int) { func libc_getdtablesize_trampoline() -//go:linkname libc_getdtablesize libc_getdtablesize //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1305,7 +1233,6 @@ func Getegid() (egid int) { func libc_getegid_trampoline() -//go:linkname libc_getegid libc_getegid //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1318,7 +1245,6 @@ func Geteuid() (uid int) { func libc_geteuid_trampoline() -//go:linkname libc_geteuid libc_geteuid //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1331,7 +1257,6 @@ func Getgid() (gid int) { func libc_getgid_trampoline() -//go:linkname libc_getgid libc_getgid //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1347,7 +1272,6 @@ func Getpgid(pid int) (pgid int, err error) { func libc_getpgid_trampoline() -//go:linkname libc_getpgid libc_getpgid //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1360,7 +1284,6 @@ func Getpgrp() (pgrp int) { func libc_getpgrp_trampoline() -//go:linkname libc_getpgrp libc_getpgrp //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1373,7 +1296,6 @@ func Getpid() (pid int) { func libc_getpid_trampoline() -//go:linkname libc_getpid libc_getpid //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1386,7 +1308,6 @@ func Getppid() (ppid int) { func libc_getppid_trampoline() -//go:linkname libc_getppid libc_getppid //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1402,7 +1323,6 @@ func Getpriority(which int, who int) (prio int, err error) { func libc_getpriority_trampoline() -//go:linkname libc_getpriority libc_getpriority //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1417,7 +1337,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func libc_getrlimit_trampoline() -//go:linkname libc_getrlimit libc_getrlimit //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1432,7 +1351,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { func libc_getrusage_trampoline() -//go:linkname libc_getrusage libc_getrusage //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1448,7 +1366,6 @@ func Getsid(pid int) (sid int, err error) { func libc_getsid_trampoline() -//go:linkname libc_getsid libc_getsid //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1463,7 +1380,6 @@ func Gettimeofday(tp *Timeval) (err error) { func libc_gettimeofday_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1476,7 +1392,6 @@ func Getuid() (uid int) { func libc_getuid_trampoline() -//go:linkname libc_getuid libc_getuid //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1489,7 +1404,6 @@ func Issetugid() (tainted bool) { func libc_issetugid_trampoline() -//go:linkname libc_issetugid libc_issetugid //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1505,7 +1419,6 @@ func Kqueue() (fd int, err error) { func libc_kqueue_trampoline() -//go:linkname libc_kqueue libc_kqueue //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1525,7 +1438,6 @@ func Lchown(path string, uid int, gid int) (err error) { func libc_lchown_trampoline() -//go:linkname libc_lchown libc_lchown //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1550,7 +1462,6 @@ func Link(path string, link string) (err error) { func libc_link_trampoline() -//go:linkname libc_link libc_link //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1575,7 +1486,6 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er func libc_linkat_trampoline() -//go:linkname libc_linkat libc_linkat //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1590,7 +1500,6 @@ func Listen(s int, backlog int) (err error) { func libc_listen_trampoline() -//go:linkname libc_listen libc_listen //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1610,7 +1519,6 @@ func Mkdir(path string, mode uint32) (err error) { func libc_mkdir_trampoline() -//go:linkname libc_mkdir libc_mkdir //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1630,7 +1538,6 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { func libc_mkdirat_trampoline() -//go:linkname libc_mkdirat libc_mkdirat //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1650,7 +1557,6 @@ func Mkfifo(path string, mode uint32) (err error) { func libc_mkfifo_trampoline() -//go:linkname libc_mkfifo libc_mkfifo //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1670,7 +1576,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { func libc_mknod_trampoline() -//go:linkname libc_mknod libc_mknod //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1691,7 +1596,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { func libc_open_trampoline() -//go:linkname libc_open libc_open //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1712,7 +1616,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { func libc_openat_trampoline() -//go:linkname libc_openat libc_openat //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1733,7 +1636,6 @@ func Pathconf(path string, name int) (val int, err error) { func libc_pathconf_trampoline() -//go:linkname libc_pathconf libc_pathconf //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1755,7 +1657,6 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { func libc_pread_trampoline() -//go:linkname libc_pread libc_pread //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1777,7 +1678,6 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { func libc_pwrite_trampoline() -//go:linkname libc_pwrite libc_pwrite //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1799,7 +1699,6 @@ func read(fd int, p []byte) (n int, err error) { func libc_read_trampoline() -//go:linkname libc_read libc_read //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1826,7 +1725,6 @@ func Readlink(path string, buf []byte) (n int, err error) { func libc_readlink_trampoline() -//go:linkname libc_readlink libc_readlink //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1853,7 +1751,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { func libc_readlinkat_trampoline() -//go:linkname libc_readlinkat libc_readlinkat //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1878,7 +1775,6 @@ func Rename(from string, to string) (err error) { func libc_rename_trampoline() -//go:linkname libc_rename libc_rename //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1903,7 +1799,6 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { func libc_renameat_trampoline() -//go:linkname libc_renameat libc_renameat //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1923,7 +1818,6 @@ func Revoke(path string) (err error) { func libc_revoke_trampoline() -//go:linkname libc_revoke libc_revoke //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1943,7 +1837,6 @@ func Rmdir(path string) (err error) { func libc_rmdir_trampoline() -//go:linkname libc_rmdir libc_rmdir //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1959,7 +1852,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { func libc_lseek_trampoline() -//go:linkname libc_lseek libc_lseek //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1975,7 +1867,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func libc_select_trampoline() -//go:linkname libc_select libc_select //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1990,7 +1881,6 @@ func Setegid(egid int) (err error) { func libc_setegid_trampoline() -//go:linkname libc_setegid libc_setegid //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2005,7 +1895,6 @@ func Seteuid(euid int) (err error) { func libc_seteuid_trampoline() -//go:linkname libc_seteuid libc_seteuid //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2020,7 +1909,6 @@ func Setgid(gid int) (err error) { func libc_setgid_trampoline() -//go:linkname libc_setgid libc_setgid //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2040,7 +1928,6 @@ func Setlogin(name string) (err error) { func libc_setlogin_trampoline() -//go:linkname libc_setlogin libc_setlogin //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2055,7 +1942,6 @@ func Setpgid(pid int, pgid int) (err error) { func libc_setpgid_trampoline() -//go:linkname libc_setpgid libc_setpgid //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2070,7 +1956,6 @@ func Setpriority(which int, who int, prio int) (err error) { func libc_setpriority_trampoline() -//go:linkname libc_setpriority libc_setpriority //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2085,7 +1970,6 @@ func Setprivexec(flag int) (err error) { func libc_setprivexec_trampoline() -//go:linkname libc_setprivexec libc_setprivexec //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2100,7 +1984,6 @@ func Setregid(rgid int, egid int) (err error) { func libc_setregid_trampoline() -//go:linkname libc_setregid libc_setregid //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2115,7 +1998,6 @@ func Setreuid(ruid int, euid int) (err error) { func libc_setreuid_trampoline() -//go:linkname libc_setreuid libc_setreuid //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2130,7 +2012,6 @@ func Setrlimit(which int, lim *Rlimit) (err error) { func libc_setrlimit_trampoline() -//go:linkname libc_setrlimit libc_setrlimit //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2146,7 +2027,6 @@ func Setsid() (pid int, err error) { func libc_setsid_trampoline() -//go:linkname libc_setsid libc_setsid //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2161,7 +2041,6 @@ func Settimeofday(tp *Timeval) (err error) { func libc_settimeofday_trampoline() -//go:linkname libc_settimeofday libc_settimeofday //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2176,7 +2055,6 @@ func Setuid(uid int) (err error) { func libc_setuid_trampoline() -//go:linkname libc_setuid libc_setuid //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2201,7 +2079,6 @@ func Symlink(path string, link string) (err error) { func libc_symlink_trampoline() -//go:linkname libc_symlink libc_symlink //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2226,7 +2103,6 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { func libc_symlinkat_trampoline() -//go:linkname libc_symlinkat libc_symlinkat //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2241,7 +2117,6 @@ func Sync() (err error) { func libc_sync_trampoline() -//go:linkname libc_sync libc_sync //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2261,7 +2136,6 @@ func Truncate(path string, length int64) (err error) { func libc_truncate_trampoline() -//go:linkname libc_truncate libc_truncate //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2274,7 +2148,6 @@ func Umask(newmask int) (oldmask int) { func libc_umask_trampoline() -//go:linkname libc_umask libc_umask //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2294,7 +2167,6 @@ func Undelete(path string) (err error) { func libc_undelete_trampoline() -//go:linkname libc_undelete libc_undelete //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2314,7 +2186,6 @@ func Unlink(path string) (err error) { func libc_unlink_trampoline() -//go:linkname libc_unlink libc_unlink //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2334,7 +2205,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func libc_unlinkat_trampoline() -//go:linkname libc_unlinkat libc_unlinkat //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2354,7 +2224,6 @@ func Unmount(path string, flags int) (err error) { func libc_unmount_trampoline() -//go:linkname libc_unmount libc_unmount //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2376,7 +2245,6 @@ func write(fd int, p []byte) (n int, err error) { func libc_write_trampoline() -//go:linkname libc_write libc_write //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2392,7 +2260,6 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func libc_mmap_trampoline() -//go:linkname libc_mmap libc_mmap //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2407,7 +2274,6 @@ func munmap(addr uintptr, length uintptr) (err error) { func libc_munmap_trampoline() -//go:linkname libc_munmap libc_munmap //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2444,7 +2310,6 @@ func Fstat(fd int, stat *Stat_t) (err error) { func libc_fstat_trampoline() -//go:linkname libc_fstat libc_fstat //go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2464,7 +2329,6 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func libc_fstatat_trampoline() -//go:linkname libc_fstatat libc_fstatat //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2479,7 +2343,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { func libc_fstatfs_trampoline() -//go:linkname libc_fstatfs libc_fstatfs //go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2495,7 +2358,6 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { func libc_getfsstat_trampoline() -//go:linkname libc_getfsstat libc_getfsstat //go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2515,7 +2377,6 @@ func Lstat(path string, stat *Stat_t) (err error) { func libc_lstat_trampoline() -//go:linkname libc_lstat libc_lstat //go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2535,7 +2396,6 @@ func Stat(path string, stat *Stat_t) (err error) { func libc_stat_trampoline() -//go:linkname libc_stat libc_stat //go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2555,5 +2415,4 @@ func Statfs(path string, stat *Statfs_t) (err error) { func libc_statfs_trampoline() -//go:linkname libc_statfs libc_statfs //go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go index d64e6c806f5..870eb37abf5 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.1_13.go @@ -24,7 +24,6 @@ func closedir(dir uintptr) (err error) { func libc_closedir_trampoline() -//go:linkname libc_closedir libc_closedir //go:cgo_import_dynamic libc_closedir closedir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -37,5 +36,4 @@ func readdir_r(dir uintptr, entry *Dirent, result **Dirent) (res Errno) { func libc_readdir_r_trampoline() -//go:linkname libc_readdir_r libc_readdir_r //go:cgo_import_dynamic libc_readdir_r readdir_r "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go index 23b65a5301a..99509b12dac 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go @@ -25,7 +25,6 @@ func getgroups(ngid int, gid *_Gid_t) (n int, err error) { func libc_getgroups_trampoline() -//go:linkname libc_getgroups libc_getgroups //go:cgo_import_dynamic libc_getgroups getgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -40,7 +39,6 @@ func setgroups(ngid int, gid *_Gid_t) (err error) { func libc_setgroups_trampoline() -//go:linkname libc_setgroups libc_setgroups //go:cgo_import_dynamic libc_setgroups setgroups "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -56,7 +54,6 @@ func wait4(pid int, wstatus *_C_int, options int, rusage *Rusage) (wpid int, err func libc_wait4_trampoline() -//go:linkname libc_wait4 libc_wait4 //go:cgo_import_dynamic libc_wait4 wait4 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -72,7 +69,6 @@ func accept(s int, rsa *RawSockaddrAny, addrlen *_Socklen) (fd int, err error) { func libc_accept_trampoline() -//go:linkname libc_accept libc_accept //go:cgo_import_dynamic libc_accept accept "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -87,7 +83,6 @@ func bind(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_bind_trampoline() -//go:linkname libc_bind libc_bind //go:cgo_import_dynamic libc_bind bind "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -102,7 +97,6 @@ func connect(s int, addr unsafe.Pointer, addrlen _Socklen) (err error) { func libc_connect_trampoline() -//go:linkname libc_connect libc_connect //go:cgo_import_dynamic libc_connect connect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -118,7 +112,6 @@ func socket(domain int, typ int, proto int) (fd int, err error) { func libc_socket_trampoline() -//go:linkname libc_socket libc_socket //go:cgo_import_dynamic libc_socket socket "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -133,7 +126,6 @@ func getsockopt(s int, level int, name int, val unsafe.Pointer, vallen *_Socklen func libc_getsockopt_trampoline() -//go:linkname libc_getsockopt libc_getsockopt //go:cgo_import_dynamic libc_getsockopt getsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -148,7 +140,6 @@ func setsockopt(s int, level int, name int, val unsafe.Pointer, vallen uintptr) func libc_setsockopt_trampoline() -//go:linkname libc_setsockopt libc_setsockopt //go:cgo_import_dynamic libc_setsockopt setsockopt "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -163,7 +154,6 @@ func getpeername(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getpeername_trampoline() -//go:linkname libc_getpeername libc_getpeername //go:cgo_import_dynamic libc_getpeername getpeername "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -178,7 +168,6 @@ func getsockname(fd int, rsa *RawSockaddrAny, addrlen *_Socklen) (err error) { func libc_getsockname_trampoline() -//go:linkname libc_getsockname libc_getsockname //go:cgo_import_dynamic libc_getsockname getsockname "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -193,7 +182,6 @@ func Shutdown(s int, how int) (err error) { func libc_shutdown_trampoline() -//go:linkname libc_shutdown libc_shutdown //go:cgo_import_dynamic libc_shutdown shutdown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -208,7 +196,6 @@ func socketpair(domain int, typ int, proto int, fd *[2]int32) (err error) { func libc_socketpair_trampoline() -//go:linkname libc_socketpair libc_socketpair //go:cgo_import_dynamic libc_socketpair socketpair "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -230,7 +217,6 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl func libc_recvfrom_trampoline() -//go:linkname libc_recvfrom libc_recvfrom //go:cgo_import_dynamic libc_recvfrom recvfrom "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -251,7 +237,6 @@ func sendto(s int, buf []byte, flags int, to unsafe.Pointer, addrlen _Socklen) ( func libc_sendto_trampoline() -//go:linkname libc_sendto libc_sendto //go:cgo_import_dynamic libc_sendto sendto "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -267,7 +252,6 @@ func recvmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_recvmsg_trampoline() -//go:linkname libc_recvmsg libc_recvmsg //go:cgo_import_dynamic libc_recvmsg recvmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -283,7 +267,6 @@ func sendmsg(s int, msg *Msghdr, flags int) (n int, err error) { func libc_sendmsg_trampoline() -//go:linkname libc_sendmsg libc_sendmsg //go:cgo_import_dynamic libc_sendmsg sendmsg "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -299,7 +282,6 @@ func kevent(kq int, change unsafe.Pointer, nchange int, event unsafe.Pointer, ne func libc_kevent_trampoline() -//go:linkname libc_kevent libc_kevent //go:cgo_import_dynamic libc_kevent kevent "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -319,7 +301,6 @@ func utimes(path string, timeval *[2]Timeval) (err error) { func libc_utimes_trampoline() -//go:linkname libc_utimes libc_utimes //go:cgo_import_dynamic libc_utimes utimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -334,7 +315,6 @@ func futimes(fd int, timeval *[2]Timeval) (err error) { func libc_futimes_trampoline() -//go:linkname libc_futimes libc_futimes //go:cgo_import_dynamic libc_futimes futimes "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -350,7 +330,6 @@ func poll(fds *PollFd, nfds int, timeout int) (n int, err error) { func libc_poll_trampoline() -//go:linkname libc_poll libc_poll //go:cgo_import_dynamic libc_poll poll "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -371,7 +350,6 @@ func Madvise(b []byte, behav int) (err error) { func libc_madvise_trampoline() -//go:linkname libc_madvise libc_madvise //go:cgo_import_dynamic libc_madvise madvise "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -392,7 +370,6 @@ func Mlock(b []byte) (err error) { func libc_mlock_trampoline() -//go:linkname libc_mlock libc_mlock //go:cgo_import_dynamic libc_mlock mlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -407,7 +384,6 @@ func Mlockall(flags int) (err error) { func libc_mlockall_trampoline() -//go:linkname libc_mlockall libc_mlockall //go:cgo_import_dynamic libc_mlockall mlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -428,7 +404,6 @@ func Mprotect(b []byte, prot int) (err error) { func libc_mprotect_trampoline() -//go:linkname libc_mprotect libc_mprotect //go:cgo_import_dynamic libc_mprotect mprotect "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -449,7 +424,6 @@ func Msync(b []byte, flags int) (err error) { func libc_msync_trampoline() -//go:linkname libc_msync libc_msync //go:cgo_import_dynamic libc_msync msync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -470,7 +444,6 @@ func Munlock(b []byte) (err error) { func libc_munlock_trampoline() -//go:linkname libc_munlock libc_munlock //go:cgo_import_dynamic libc_munlock munlock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -485,7 +458,6 @@ func Munlockall() (err error) { func libc_munlockall_trampoline() -//go:linkname libc_munlockall libc_munlockall //go:cgo_import_dynamic libc_munlockall munlockall "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -502,7 +474,6 @@ func pipe() (r int, w int, err error) { func libc_pipe_trampoline() -//go:linkname libc_pipe libc_pipe //go:cgo_import_dynamic libc_pipe pipe "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -528,7 +499,6 @@ func getxattr(path string, attr string, dest *byte, size int, position uint32, o func libc_getxattr_trampoline() -//go:linkname libc_getxattr libc_getxattr //go:cgo_import_dynamic libc_getxattr getxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -549,7 +519,6 @@ func fgetxattr(fd int, attr string, dest *byte, size int, position uint32, optio func libc_fgetxattr_trampoline() -//go:linkname libc_fgetxattr libc_fgetxattr //go:cgo_import_dynamic libc_fgetxattr fgetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -574,7 +543,6 @@ func setxattr(path string, attr string, data *byte, size int, position uint32, o func libc_setxattr_trampoline() -//go:linkname libc_setxattr libc_setxattr //go:cgo_import_dynamic libc_setxattr setxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -594,7 +562,6 @@ func fsetxattr(fd int, attr string, data *byte, size int, position uint32, optio func libc_fsetxattr_trampoline() -//go:linkname libc_fsetxattr libc_fsetxattr //go:cgo_import_dynamic libc_fsetxattr fsetxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -619,7 +586,6 @@ func removexattr(path string, attr string, options int) (err error) { func libc_removexattr_trampoline() -//go:linkname libc_removexattr libc_removexattr //go:cgo_import_dynamic libc_removexattr removexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -639,7 +605,6 @@ func fremovexattr(fd int, attr string, options int) (err error) { func libc_fremovexattr_trampoline() -//go:linkname libc_fremovexattr libc_fremovexattr //go:cgo_import_dynamic libc_fremovexattr fremovexattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -660,7 +625,6 @@ func listxattr(path string, dest *byte, size int, options int) (sz int, err erro func libc_listxattr_trampoline() -//go:linkname libc_listxattr libc_listxattr //go:cgo_import_dynamic libc_listxattr listxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -676,7 +640,6 @@ func flistxattr(fd int, dest *byte, size int, options int) (sz int, err error) { func libc_flistxattr_trampoline() -//go:linkname libc_flistxattr libc_flistxattr //go:cgo_import_dynamic libc_flistxattr flistxattr "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -691,7 +654,6 @@ func setattrlist(path *byte, list unsafe.Pointer, buf unsafe.Pointer, size uintp func libc_setattrlist_trampoline() -//go:linkname libc_setattrlist libc_setattrlist //go:cgo_import_dynamic libc_setattrlist setattrlist "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -707,7 +669,6 @@ func fcntl(fd int, cmd int, arg int) (val int, err error) { func libc_fcntl_trampoline() -//go:linkname libc_fcntl libc_fcntl //go:cgo_import_dynamic libc_fcntl fcntl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -722,7 +683,6 @@ func kill(pid int, signum int, posix int) (err error) { func libc_kill_trampoline() -//go:linkname libc_kill libc_kill //go:cgo_import_dynamic libc_kill kill "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -737,7 +697,6 @@ func ioctl(fd int, req uint, arg uintptr) (err error) { func libc_ioctl_trampoline() -//go:linkname libc_ioctl libc_ioctl //go:cgo_import_dynamic libc_ioctl ioctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -758,7 +717,6 @@ func sysctl(mib []_C_int, old *byte, oldlen *uintptr, new *byte, newlen uintptr) func libc_sysctl_trampoline() -//go:linkname libc_sysctl libc_sysctl //go:cgo_import_dynamic libc_sysctl sysctl "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -773,7 +731,6 @@ func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer func libc_sendfile_trampoline() -//go:linkname libc_sendfile libc_sendfile //go:cgo_import_dynamic libc_sendfile sendfile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -793,7 +750,6 @@ func Access(path string, mode uint32) (err error) { func libc_access_trampoline() -//go:linkname libc_access libc_access //go:cgo_import_dynamic libc_access access "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -808,7 +764,6 @@ func Adjtime(delta *Timeval, olddelta *Timeval) (err error) { func libc_adjtime_trampoline() -//go:linkname libc_adjtime libc_adjtime //go:cgo_import_dynamic libc_adjtime adjtime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -828,7 +783,6 @@ func Chdir(path string) (err error) { func libc_chdir_trampoline() -//go:linkname libc_chdir libc_chdir //go:cgo_import_dynamic libc_chdir chdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -848,7 +802,6 @@ func Chflags(path string, flags int) (err error) { func libc_chflags_trampoline() -//go:linkname libc_chflags libc_chflags //go:cgo_import_dynamic libc_chflags chflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -868,7 +821,6 @@ func Chmod(path string, mode uint32) (err error) { func libc_chmod_trampoline() -//go:linkname libc_chmod libc_chmod //go:cgo_import_dynamic libc_chmod chmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -888,7 +840,6 @@ func Chown(path string, uid int, gid int) (err error) { func libc_chown_trampoline() -//go:linkname libc_chown libc_chown //go:cgo_import_dynamic libc_chown chown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -908,7 +859,6 @@ func Chroot(path string) (err error) { func libc_chroot_trampoline() -//go:linkname libc_chroot libc_chroot //go:cgo_import_dynamic libc_chroot chroot "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -923,7 +873,6 @@ func ClockGettime(clockid int32, time *Timespec) (err error) { func libc_clock_gettime_trampoline() -//go:linkname libc_clock_gettime libc_clock_gettime //go:cgo_import_dynamic libc_clock_gettime clock_gettime "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -938,7 +887,6 @@ func Close(fd int) (err error) { func libc_close_trampoline() -//go:linkname libc_close libc_close //go:cgo_import_dynamic libc_close close "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -963,7 +911,6 @@ func Clonefile(src string, dst string, flags int) (err error) { func libc_clonefile_trampoline() -//go:linkname libc_clonefile libc_clonefile //go:cgo_import_dynamic libc_clonefile clonefile "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -988,7 +935,6 @@ func Clonefileat(srcDirfd int, src string, dstDirfd int, dst string, flags int) func libc_clonefileat_trampoline() -//go:linkname libc_clonefileat libc_clonefileat //go:cgo_import_dynamic libc_clonefileat clonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1004,7 +950,6 @@ func Dup(fd int) (nfd int, err error) { func libc_dup_trampoline() -//go:linkname libc_dup libc_dup //go:cgo_import_dynamic libc_dup dup "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1019,7 +964,6 @@ func Dup2(from int, to int) (err error) { func libc_dup2_trampoline() -//go:linkname libc_dup2 libc_dup2 //go:cgo_import_dynamic libc_dup2 dup2 "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1044,7 +988,6 @@ func Exchangedata(path1 string, path2 string, options int) (err error) { func libc_exchangedata_trampoline() -//go:linkname libc_exchangedata libc_exchangedata //go:cgo_import_dynamic libc_exchangedata exchangedata "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1056,7 +999,6 @@ func Exit(code int) { func libc_exit_trampoline() -//go:linkname libc_exit libc_exit //go:cgo_import_dynamic libc_exit exit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1076,7 +1018,6 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_faccessat_trampoline() -//go:linkname libc_faccessat libc_faccessat //go:cgo_import_dynamic libc_faccessat faccessat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1091,7 +1032,6 @@ func Fchdir(fd int) (err error) { func libc_fchdir_trampoline() -//go:linkname libc_fchdir libc_fchdir //go:cgo_import_dynamic libc_fchdir fchdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1106,7 +1046,6 @@ func Fchflags(fd int, flags int) (err error) { func libc_fchflags_trampoline() -//go:linkname libc_fchflags libc_fchflags //go:cgo_import_dynamic libc_fchflags fchflags "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1121,7 +1060,6 @@ func Fchmod(fd int, mode uint32) (err error) { func libc_fchmod_trampoline() -//go:linkname libc_fchmod libc_fchmod //go:cgo_import_dynamic libc_fchmod fchmod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1141,7 +1079,6 @@ func Fchmodat(dirfd int, path string, mode uint32, flags int) (err error) { func libc_fchmodat_trampoline() -//go:linkname libc_fchmodat libc_fchmodat //go:cgo_import_dynamic libc_fchmodat fchmodat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1156,7 +1093,6 @@ func Fchown(fd int, uid int, gid int) (err error) { func libc_fchown_trampoline() -//go:linkname libc_fchown libc_fchown //go:cgo_import_dynamic libc_fchown fchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1176,7 +1112,6 @@ func Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error) { func libc_fchownat_trampoline() -//go:linkname libc_fchownat libc_fchownat //go:cgo_import_dynamic libc_fchownat fchownat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1196,7 +1131,6 @@ func Fclonefileat(srcDirfd int, dstDirfd int, dst string, flags int) (err error) func libc_fclonefileat_trampoline() -//go:linkname libc_fclonefileat libc_fclonefileat //go:cgo_import_dynamic libc_fclonefileat fclonefileat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1211,7 +1145,6 @@ func Flock(fd int, how int) (err error) { func libc_flock_trampoline() -//go:linkname libc_flock libc_flock //go:cgo_import_dynamic libc_flock flock "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1227,7 +1160,6 @@ func Fpathconf(fd int, name int) (val int, err error) { func libc_fpathconf_trampoline() -//go:linkname libc_fpathconf libc_fpathconf //go:cgo_import_dynamic libc_fpathconf fpathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1242,7 +1174,6 @@ func Fsync(fd int) (err error) { func libc_fsync_trampoline() -//go:linkname libc_fsync libc_fsync //go:cgo_import_dynamic libc_fsync fsync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1257,7 +1188,6 @@ func Ftruncate(fd int, length int64) (err error) { func libc_ftruncate_trampoline() -//go:linkname libc_ftruncate libc_ftruncate //go:cgo_import_dynamic libc_ftruncate ftruncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1279,7 +1209,6 @@ func Getcwd(buf []byte) (n int, err error) { func libc_getcwd_trampoline() -//go:linkname libc_getcwd libc_getcwd //go:cgo_import_dynamic libc_getcwd getcwd "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1292,7 +1221,6 @@ func Getdtablesize() (size int) { func libc_getdtablesize_trampoline() -//go:linkname libc_getdtablesize libc_getdtablesize //go:cgo_import_dynamic libc_getdtablesize getdtablesize "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1305,7 +1233,6 @@ func Getegid() (egid int) { func libc_getegid_trampoline() -//go:linkname libc_getegid libc_getegid //go:cgo_import_dynamic libc_getegid getegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1318,7 +1245,6 @@ func Geteuid() (uid int) { func libc_geteuid_trampoline() -//go:linkname libc_geteuid libc_geteuid //go:cgo_import_dynamic libc_geteuid geteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1331,7 +1257,6 @@ func Getgid() (gid int) { func libc_getgid_trampoline() -//go:linkname libc_getgid libc_getgid //go:cgo_import_dynamic libc_getgid getgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1347,7 +1272,6 @@ func Getpgid(pid int) (pgid int, err error) { func libc_getpgid_trampoline() -//go:linkname libc_getpgid libc_getpgid //go:cgo_import_dynamic libc_getpgid getpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1360,7 +1284,6 @@ func Getpgrp() (pgrp int) { func libc_getpgrp_trampoline() -//go:linkname libc_getpgrp libc_getpgrp //go:cgo_import_dynamic libc_getpgrp getpgrp "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1373,7 +1296,6 @@ func Getpid() (pid int) { func libc_getpid_trampoline() -//go:linkname libc_getpid libc_getpid //go:cgo_import_dynamic libc_getpid getpid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1386,7 +1308,6 @@ func Getppid() (ppid int) { func libc_getppid_trampoline() -//go:linkname libc_getppid libc_getppid //go:cgo_import_dynamic libc_getppid getppid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1402,7 +1323,6 @@ func Getpriority(which int, who int) (prio int, err error) { func libc_getpriority_trampoline() -//go:linkname libc_getpriority libc_getpriority //go:cgo_import_dynamic libc_getpriority getpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1417,7 +1337,6 @@ func Getrlimit(which int, lim *Rlimit) (err error) { func libc_getrlimit_trampoline() -//go:linkname libc_getrlimit libc_getrlimit //go:cgo_import_dynamic libc_getrlimit getrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1432,7 +1351,6 @@ func Getrusage(who int, rusage *Rusage) (err error) { func libc_getrusage_trampoline() -//go:linkname libc_getrusage libc_getrusage //go:cgo_import_dynamic libc_getrusage getrusage "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1448,7 +1366,6 @@ func Getsid(pid int) (sid int, err error) { func libc_getsid_trampoline() -//go:linkname libc_getsid libc_getsid //go:cgo_import_dynamic libc_getsid getsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1463,7 +1380,6 @@ func Gettimeofday(tp *Timeval) (err error) { func libc_gettimeofday_trampoline() -//go:linkname libc_gettimeofday libc_gettimeofday //go:cgo_import_dynamic libc_gettimeofday gettimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1476,7 +1392,6 @@ func Getuid() (uid int) { func libc_getuid_trampoline() -//go:linkname libc_getuid libc_getuid //go:cgo_import_dynamic libc_getuid getuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1489,7 +1404,6 @@ func Issetugid() (tainted bool) { func libc_issetugid_trampoline() -//go:linkname libc_issetugid libc_issetugid //go:cgo_import_dynamic libc_issetugid issetugid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1505,7 +1419,6 @@ func Kqueue() (fd int, err error) { func libc_kqueue_trampoline() -//go:linkname libc_kqueue libc_kqueue //go:cgo_import_dynamic libc_kqueue kqueue "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1525,7 +1438,6 @@ func Lchown(path string, uid int, gid int) (err error) { func libc_lchown_trampoline() -//go:linkname libc_lchown libc_lchown //go:cgo_import_dynamic libc_lchown lchown "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1550,7 +1462,6 @@ func Link(path string, link string) (err error) { func libc_link_trampoline() -//go:linkname libc_link libc_link //go:cgo_import_dynamic libc_link link "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1575,7 +1486,6 @@ func Linkat(pathfd int, path string, linkfd int, link string, flags int) (err er func libc_linkat_trampoline() -//go:linkname libc_linkat libc_linkat //go:cgo_import_dynamic libc_linkat linkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1590,7 +1500,6 @@ func Listen(s int, backlog int) (err error) { func libc_listen_trampoline() -//go:linkname libc_listen libc_listen //go:cgo_import_dynamic libc_listen listen "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1610,7 +1519,6 @@ func Mkdir(path string, mode uint32) (err error) { func libc_mkdir_trampoline() -//go:linkname libc_mkdir libc_mkdir //go:cgo_import_dynamic libc_mkdir mkdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1630,7 +1538,6 @@ func Mkdirat(dirfd int, path string, mode uint32) (err error) { func libc_mkdirat_trampoline() -//go:linkname libc_mkdirat libc_mkdirat //go:cgo_import_dynamic libc_mkdirat mkdirat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1650,7 +1557,6 @@ func Mkfifo(path string, mode uint32) (err error) { func libc_mkfifo_trampoline() -//go:linkname libc_mkfifo libc_mkfifo //go:cgo_import_dynamic libc_mkfifo mkfifo "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1670,7 +1576,6 @@ func Mknod(path string, mode uint32, dev int) (err error) { func libc_mknod_trampoline() -//go:linkname libc_mknod libc_mknod //go:cgo_import_dynamic libc_mknod mknod "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1691,7 +1596,6 @@ func Open(path string, mode int, perm uint32) (fd int, err error) { func libc_open_trampoline() -//go:linkname libc_open libc_open //go:cgo_import_dynamic libc_open open "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1712,7 +1616,6 @@ func Openat(dirfd int, path string, mode int, perm uint32) (fd int, err error) { func libc_openat_trampoline() -//go:linkname libc_openat libc_openat //go:cgo_import_dynamic libc_openat openat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1733,7 +1636,6 @@ func Pathconf(path string, name int) (val int, err error) { func libc_pathconf_trampoline() -//go:linkname libc_pathconf libc_pathconf //go:cgo_import_dynamic libc_pathconf pathconf "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1755,7 +1657,6 @@ func Pread(fd int, p []byte, offset int64) (n int, err error) { func libc_pread_trampoline() -//go:linkname libc_pread libc_pread //go:cgo_import_dynamic libc_pread pread "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1777,7 +1678,6 @@ func Pwrite(fd int, p []byte, offset int64) (n int, err error) { func libc_pwrite_trampoline() -//go:linkname libc_pwrite libc_pwrite //go:cgo_import_dynamic libc_pwrite pwrite "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1799,7 +1699,6 @@ func read(fd int, p []byte) (n int, err error) { func libc_read_trampoline() -//go:linkname libc_read libc_read //go:cgo_import_dynamic libc_read read "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1826,7 +1725,6 @@ func Readlink(path string, buf []byte) (n int, err error) { func libc_readlink_trampoline() -//go:linkname libc_readlink libc_readlink //go:cgo_import_dynamic libc_readlink readlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1853,7 +1751,6 @@ func Readlinkat(dirfd int, path string, buf []byte) (n int, err error) { func libc_readlinkat_trampoline() -//go:linkname libc_readlinkat libc_readlinkat //go:cgo_import_dynamic libc_readlinkat readlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1878,7 +1775,6 @@ func Rename(from string, to string) (err error) { func libc_rename_trampoline() -//go:linkname libc_rename libc_rename //go:cgo_import_dynamic libc_rename rename "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1903,7 +1799,6 @@ func Renameat(fromfd int, from string, tofd int, to string) (err error) { func libc_renameat_trampoline() -//go:linkname libc_renameat libc_renameat //go:cgo_import_dynamic libc_renameat renameat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1923,7 +1818,6 @@ func Revoke(path string) (err error) { func libc_revoke_trampoline() -//go:linkname libc_revoke libc_revoke //go:cgo_import_dynamic libc_revoke revoke "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1943,7 +1837,6 @@ func Rmdir(path string) (err error) { func libc_rmdir_trampoline() -//go:linkname libc_rmdir libc_rmdir //go:cgo_import_dynamic libc_rmdir rmdir "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1959,7 +1852,6 @@ func Seek(fd int, offset int64, whence int) (newoffset int64, err error) { func libc_lseek_trampoline() -//go:linkname libc_lseek libc_lseek //go:cgo_import_dynamic libc_lseek lseek "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1975,7 +1867,6 @@ func Select(nfd int, r *FdSet, w *FdSet, e *FdSet, timeout *Timeval) (n int, err func libc_select_trampoline() -//go:linkname libc_select libc_select //go:cgo_import_dynamic libc_select select "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -1990,7 +1881,6 @@ func Setegid(egid int) (err error) { func libc_setegid_trampoline() -//go:linkname libc_setegid libc_setegid //go:cgo_import_dynamic libc_setegid setegid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2005,7 +1895,6 @@ func Seteuid(euid int) (err error) { func libc_seteuid_trampoline() -//go:linkname libc_seteuid libc_seteuid //go:cgo_import_dynamic libc_seteuid seteuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2020,7 +1909,6 @@ func Setgid(gid int) (err error) { func libc_setgid_trampoline() -//go:linkname libc_setgid libc_setgid //go:cgo_import_dynamic libc_setgid setgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2040,7 +1928,6 @@ func Setlogin(name string) (err error) { func libc_setlogin_trampoline() -//go:linkname libc_setlogin libc_setlogin //go:cgo_import_dynamic libc_setlogin setlogin "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2055,7 +1942,6 @@ func Setpgid(pid int, pgid int) (err error) { func libc_setpgid_trampoline() -//go:linkname libc_setpgid libc_setpgid //go:cgo_import_dynamic libc_setpgid setpgid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2070,7 +1956,6 @@ func Setpriority(which int, who int, prio int) (err error) { func libc_setpriority_trampoline() -//go:linkname libc_setpriority libc_setpriority //go:cgo_import_dynamic libc_setpriority setpriority "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2085,7 +1970,6 @@ func Setprivexec(flag int) (err error) { func libc_setprivexec_trampoline() -//go:linkname libc_setprivexec libc_setprivexec //go:cgo_import_dynamic libc_setprivexec setprivexec "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2100,7 +1984,6 @@ func Setregid(rgid int, egid int) (err error) { func libc_setregid_trampoline() -//go:linkname libc_setregid libc_setregid //go:cgo_import_dynamic libc_setregid setregid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2115,7 +1998,6 @@ func Setreuid(ruid int, euid int) (err error) { func libc_setreuid_trampoline() -//go:linkname libc_setreuid libc_setreuid //go:cgo_import_dynamic libc_setreuid setreuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2130,7 +2012,6 @@ func Setrlimit(which int, lim *Rlimit) (err error) { func libc_setrlimit_trampoline() -//go:linkname libc_setrlimit libc_setrlimit //go:cgo_import_dynamic libc_setrlimit setrlimit "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2146,7 +2027,6 @@ func Setsid() (pid int, err error) { func libc_setsid_trampoline() -//go:linkname libc_setsid libc_setsid //go:cgo_import_dynamic libc_setsid setsid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2161,7 +2041,6 @@ func Settimeofday(tp *Timeval) (err error) { func libc_settimeofday_trampoline() -//go:linkname libc_settimeofday libc_settimeofday //go:cgo_import_dynamic libc_settimeofday settimeofday "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2176,7 +2055,6 @@ func Setuid(uid int) (err error) { func libc_setuid_trampoline() -//go:linkname libc_setuid libc_setuid //go:cgo_import_dynamic libc_setuid setuid "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2201,7 +2079,6 @@ func Symlink(path string, link string) (err error) { func libc_symlink_trampoline() -//go:linkname libc_symlink libc_symlink //go:cgo_import_dynamic libc_symlink symlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2226,7 +2103,6 @@ func Symlinkat(oldpath string, newdirfd int, newpath string) (err error) { func libc_symlinkat_trampoline() -//go:linkname libc_symlinkat libc_symlinkat //go:cgo_import_dynamic libc_symlinkat symlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2241,7 +2117,6 @@ func Sync() (err error) { func libc_sync_trampoline() -//go:linkname libc_sync libc_sync //go:cgo_import_dynamic libc_sync sync "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2261,7 +2136,6 @@ func Truncate(path string, length int64) (err error) { func libc_truncate_trampoline() -//go:linkname libc_truncate libc_truncate //go:cgo_import_dynamic libc_truncate truncate "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2274,7 +2148,6 @@ func Umask(newmask int) (oldmask int) { func libc_umask_trampoline() -//go:linkname libc_umask libc_umask //go:cgo_import_dynamic libc_umask umask "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2294,7 +2167,6 @@ func Undelete(path string) (err error) { func libc_undelete_trampoline() -//go:linkname libc_undelete libc_undelete //go:cgo_import_dynamic libc_undelete undelete "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2314,7 +2186,6 @@ func Unlink(path string) (err error) { func libc_unlink_trampoline() -//go:linkname libc_unlink libc_unlink //go:cgo_import_dynamic libc_unlink unlink "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2334,7 +2205,6 @@ func Unlinkat(dirfd int, path string, flags int) (err error) { func libc_unlinkat_trampoline() -//go:linkname libc_unlinkat libc_unlinkat //go:cgo_import_dynamic libc_unlinkat unlinkat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2354,7 +2224,6 @@ func Unmount(path string, flags int) (err error) { func libc_unmount_trampoline() -//go:linkname libc_unmount libc_unmount //go:cgo_import_dynamic libc_unmount unmount "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2376,7 +2245,6 @@ func write(fd int, p []byte) (n int, err error) { func libc_write_trampoline() -//go:linkname libc_write libc_write //go:cgo_import_dynamic libc_write write "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2392,7 +2260,6 @@ func mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) ( func libc_mmap_trampoline() -//go:linkname libc_mmap libc_mmap //go:cgo_import_dynamic libc_mmap mmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2407,7 +2274,6 @@ func munmap(addr uintptr, length uintptr) (err error) { func libc_munmap_trampoline() -//go:linkname libc_munmap libc_munmap //go:cgo_import_dynamic libc_munmap munmap "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2444,7 +2310,6 @@ func Fstat(fd int, stat *Stat_t) (err error) { func libc_fstat_trampoline() -//go:linkname libc_fstat libc_fstat //go:cgo_import_dynamic libc_fstat fstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2464,7 +2329,6 @@ func Fstatat(fd int, path string, stat *Stat_t, flags int) (err error) { func libc_fstatat_trampoline() -//go:linkname libc_fstatat libc_fstatat //go:cgo_import_dynamic libc_fstatat fstatat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2479,7 +2343,6 @@ func Fstatfs(fd int, stat *Statfs_t) (err error) { func libc_fstatfs_trampoline() -//go:linkname libc_fstatfs libc_fstatfs //go:cgo_import_dynamic libc_fstatfs fstatfs "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2495,7 +2358,6 @@ func getfsstat(buf unsafe.Pointer, size uintptr, flags int) (n int, err error) { func libc_getfsstat_trampoline() -//go:linkname libc_getfsstat libc_getfsstat //go:cgo_import_dynamic libc_getfsstat getfsstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2515,12 +2377,11 @@ func Lstat(path string, stat *Stat_t) (err error) { func libc_lstat_trampoline() -//go:linkname libc_lstat libc_lstat //go:cgo_import_dynamic libc_lstat lstat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT -func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { +func ptrace1(request int, pid int, addr uintptr, data uintptr) (err error) { _, _, e1 := syscall_syscall6(funcPC(libc_ptrace_trampoline), uintptr(request), uintptr(pid), uintptr(addr), uintptr(data), 0, 0) if e1 != 0 { err = errnoErr(e1) @@ -2530,7 +2391,6 @@ func ptrace(request int, pid int, addr uintptr, data uintptr) (err error) { func libc_ptrace_trampoline() -//go:linkname libc_ptrace libc_ptrace //go:cgo_import_dynamic libc_ptrace ptrace "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2550,7 +2410,6 @@ func Stat(path string, stat *Stat_t) (err error) { func libc_stat_trampoline() -//go:linkname libc_stat libc_stat //go:cgo_import_dynamic libc_stat stat "/usr/lib/libSystem.B.dylib" // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT @@ -2570,5 +2429,4 @@ func Statfs(path string, stat *Statfs_t) (err error) { func libc_statfs_trampoline() -//go:linkname libc_statfs libc_statfs //go:cgo_import_dynamic libc_statfs statfs "/usr/lib/libSystem.B.dylib" diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go index 0f5a3f6970a..f6742bdee09 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go @@ -435,4 +435,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go index 36d5219ef82..f7e525573bf 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go @@ -357,4 +357,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go index 3622ba14b4e..3f60977da67 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go @@ -399,4 +399,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go index 6193c3dc07c..dbedf4cbacc 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go @@ -302,4 +302,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go index 640b974345f..eeff7e1dc93 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go @@ -420,4 +420,5 @@ const ( SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 + SYS_PROCESS_MADVISE = 4440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go index 3467fbb5ff1..73cfa535cd6 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go @@ -350,4 +350,5 @@ const ( SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go index 0fc38d5a72f..be74729e0cb 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go @@ -350,4 +350,5 @@ const ( SYS_OPENAT2 = 5437 SYS_PIDFD_GETFD = 5438 SYS_FACCESSAT2 = 5439 + SYS_PROCESS_MADVISE = 5440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go index 999fd55bccb..2a1047c818c 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go @@ -420,4 +420,5 @@ const ( SYS_OPENAT2 = 4437 SYS_PIDFD_GETFD = 4438 SYS_FACCESSAT2 = 4439 + SYS_PROCESS_MADVISE = 4440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go index 1df0d799355..32707428ce2 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go @@ -399,4 +399,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go index 4db39cca4da..a58572f7810 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go @@ -399,4 +399,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go index e6927401446..72a65b76026 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go @@ -301,4 +301,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go index a585aec4e79..1fb9ae5d493 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go @@ -364,4 +364,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go index d047e567afc..57636e09e41 100644 --- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go @@ -378,4 +378,5 @@ const ( SYS_OPENAT2 = 437 SYS_PIDFD_GETFD = 438 SYS_FACCESSAT2 = 439 + SYS_PROCESS_MADVISE = 440 ) diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go index 2c1f815e6f9..295859c503d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc.go @@ -219,6 +219,7 @@ const ( SizeofSockaddrUnix = 0x401 SizeofSockaddrDatalink = 0x80 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofIPv6MTUInfo = 0x20 diff --git a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go index b4a069ecbdf..a9ee0ffd44c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_aix_ppc64.go @@ -223,6 +223,7 @@ const ( SizeofSockaddrUnix = 0x401 SizeofSockaddrDatalink = 0x80 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofIPv6MTUInfo = 0x20 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go index 830fbb35c0a..725b4bee27d 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go @@ -269,6 +269,7 @@ const ( SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go index e53a7c49ffe..080ffce3255 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go @@ -274,6 +274,7 @@ const ( SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go index 98be973ef94..f2a77bc4e28 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go @@ -269,6 +269,7 @@ const ( SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go index ddae5afe1ba..c9492428bfa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go @@ -274,6 +274,7 @@ const ( SizeofSockaddrDatalink = 0x14 SizeofSockaddrCtl = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go index c4772df23bf..85506a05d4b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go @@ -234,6 +234,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go index 2a3ec615f75..3e9dad33e33 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go @@ -313,6 +313,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go index e11e95499e8..e00e615544c 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go @@ -309,6 +309,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go index b91c2ae0f01..5da13c871b5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go @@ -311,6 +311,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go index c6fe1d097d8..995ecf9d4e2 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm64.go @@ -309,6 +309,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x36 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPMreqn = 0xc SizeofIPv6Mreq = 0x14 diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go index 504ef131fb8..9f73d669bd4 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go @@ -1381,6 +1381,11 @@ const ( IFLA_PROP_LIST = 0x34 IFLA_ALT_IFNAME = 0x35 IFLA_PERM_ADDRESS = 0x36 + IFLA_PROTO_DOWN_REASON = 0x37 + IFLA_PROTO_DOWN_REASON_UNSPEC = 0x0 + IFLA_PROTO_DOWN_REASON_MASK = 0x1 + IFLA_PROTO_DOWN_REASON_VALUE = 0x2 + IFLA_PROTO_DOWN_REASON_MAX = 0x2 IFLA_INET_UNSPEC = 0x0 IFLA_INET_CONF = 0x1 IFLA_INET6_UNSPEC = 0x0 @@ -1475,6 +1480,7 @@ const ( IFLA_BRPORT_ISOLATED = 0x21 IFLA_BRPORT_BACKUP_PORT = 0x22 IFLA_BRPORT_MRP_RING_OPEN = 0x23 + IFLA_BRPORT_MRP_IN_OPEN = 0x24 IFLA_INFO_UNSPEC = 0x0 IFLA_INFO_KIND = 0x1 IFLA_INFO_DATA = 0x2 @@ -1673,6 +1679,7 @@ const ( IFLA_HSR_SUPERVISION_ADDR = 0x4 IFLA_HSR_SEQ_NR = 0x5 IFLA_HSR_VERSION = 0x6 + IFLA_HSR_PROTOCOL = 0x7 IFLA_STATS_UNSPEC = 0x0 IFLA_STATS_LINK_64 = 0x1 IFLA_STATS_LINK_XSTATS = 0x2 @@ -2217,10 +2224,12 @@ const ( ) const ( - NETNSA_NONE = 0x0 - NETNSA_NSID = 0x1 - NETNSA_PID = 0x2 - NETNSA_FD = 0x3 + NETNSA_NONE = 0x0 + NETNSA_NSID = 0x1 + NETNSA_PID = 0x2 + NETNSA_FD = 0x3 + NETNSA_TARGET_NSID = 0x4 + NETNSA_CURRENT_NSID = 0x5 ) type XDPRingOffset struct { @@ -2370,281 +2379,309 @@ const ( ) const ( - BPF_REG_0 = 0x0 - BPF_REG_1 = 0x1 - BPF_REG_2 = 0x2 - BPF_REG_3 = 0x3 - BPF_REG_4 = 0x4 - BPF_REG_5 = 0x5 - BPF_REG_6 = 0x6 - BPF_REG_7 = 0x7 - BPF_REG_8 = 0x8 - BPF_REG_9 = 0x9 - BPF_REG_10 = 0xa - BPF_MAP_CREATE = 0x0 - BPF_MAP_LOOKUP_ELEM = 0x1 - BPF_MAP_UPDATE_ELEM = 0x2 - BPF_MAP_DELETE_ELEM = 0x3 - BPF_MAP_GET_NEXT_KEY = 0x4 - BPF_PROG_LOAD = 0x5 - BPF_OBJ_PIN = 0x6 - BPF_OBJ_GET = 0x7 - BPF_PROG_ATTACH = 0x8 - BPF_PROG_DETACH = 0x9 - BPF_PROG_TEST_RUN = 0xa - BPF_PROG_GET_NEXT_ID = 0xb - BPF_MAP_GET_NEXT_ID = 0xc - BPF_PROG_GET_FD_BY_ID = 0xd - BPF_MAP_GET_FD_BY_ID = 0xe - BPF_OBJ_GET_INFO_BY_FD = 0xf - BPF_PROG_QUERY = 0x10 - BPF_RAW_TRACEPOINT_OPEN = 0x11 - BPF_BTF_LOAD = 0x12 - BPF_BTF_GET_FD_BY_ID = 0x13 - BPF_TASK_FD_QUERY = 0x14 - BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 - BPF_MAP_FREEZE = 0x16 - BPF_BTF_GET_NEXT_ID = 0x17 - BPF_MAP_LOOKUP_BATCH = 0x18 - BPF_MAP_LOOKUP_AND_DELETE_BATCH = 0x19 - BPF_MAP_UPDATE_BATCH = 0x1a - BPF_MAP_DELETE_BATCH = 0x1b - BPF_LINK_CREATE = 0x1c - BPF_LINK_UPDATE = 0x1d - BPF_LINK_GET_FD_BY_ID = 0x1e - BPF_LINK_GET_NEXT_ID = 0x1f - BPF_ENABLE_STATS = 0x20 - BPF_ITER_CREATE = 0x21 - BPF_MAP_TYPE_UNSPEC = 0x0 - BPF_MAP_TYPE_HASH = 0x1 - BPF_MAP_TYPE_ARRAY = 0x2 - BPF_MAP_TYPE_PROG_ARRAY = 0x3 - BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 - BPF_MAP_TYPE_PERCPU_HASH = 0x5 - BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 - BPF_MAP_TYPE_STACK_TRACE = 0x7 - BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 - BPF_MAP_TYPE_LRU_HASH = 0x9 - BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa - BPF_MAP_TYPE_LPM_TRIE = 0xb - BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc - BPF_MAP_TYPE_HASH_OF_MAPS = 0xd - BPF_MAP_TYPE_DEVMAP = 0xe - BPF_MAP_TYPE_SOCKMAP = 0xf - BPF_MAP_TYPE_CPUMAP = 0x10 - BPF_MAP_TYPE_XSKMAP = 0x11 - BPF_MAP_TYPE_SOCKHASH = 0x12 - BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 - BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 - BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 - BPF_MAP_TYPE_QUEUE = 0x16 - BPF_MAP_TYPE_STACK = 0x17 - BPF_MAP_TYPE_SK_STORAGE = 0x18 - BPF_MAP_TYPE_DEVMAP_HASH = 0x19 - BPF_MAP_TYPE_STRUCT_OPS = 0x1a - BPF_MAP_TYPE_RINGBUF = 0x1b - BPF_PROG_TYPE_UNSPEC = 0x0 - BPF_PROG_TYPE_SOCKET_FILTER = 0x1 - BPF_PROG_TYPE_KPROBE = 0x2 - BPF_PROG_TYPE_SCHED_CLS = 0x3 - BPF_PROG_TYPE_SCHED_ACT = 0x4 - BPF_PROG_TYPE_TRACEPOINT = 0x5 - BPF_PROG_TYPE_XDP = 0x6 - BPF_PROG_TYPE_PERF_EVENT = 0x7 - BPF_PROG_TYPE_CGROUP_SKB = 0x8 - BPF_PROG_TYPE_CGROUP_SOCK = 0x9 - BPF_PROG_TYPE_LWT_IN = 0xa - BPF_PROG_TYPE_LWT_OUT = 0xb - BPF_PROG_TYPE_LWT_XMIT = 0xc - BPF_PROG_TYPE_SOCK_OPS = 0xd - BPF_PROG_TYPE_SK_SKB = 0xe - BPF_PROG_TYPE_CGROUP_DEVICE = 0xf - BPF_PROG_TYPE_SK_MSG = 0x10 - BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 - BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 - BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 - BPF_PROG_TYPE_LIRC_MODE2 = 0x14 - BPF_PROG_TYPE_SK_REUSEPORT = 0x15 - BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 - BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17 - BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18 - BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19 - BPF_PROG_TYPE_TRACING = 0x1a - BPF_PROG_TYPE_STRUCT_OPS = 0x1b - BPF_PROG_TYPE_EXT = 0x1c - BPF_PROG_TYPE_LSM = 0x1d - BPF_CGROUP_INET_INGRESS = 0x0 - BPF_CGROUP_INET_EGRESS = 0x1 - BPF_CGROUP_INET_SOCK_CREATE = 0x2 - BPF_CGROUP_SOCK_OPS = 0x3 - BPF_SK_SKB_STREAM_PARSER = 0x4 - BPF_SK_SKB_STREAM_VERDICT = 0x5 - BPF_CGROUP_DEVICE = 0x6 - BPF_SK_MSG_VERDICT = 0x7 - BPF_CGROUP_INET4_BIND = 0x8 - BPF_CGROUP_INET6_BIND = 0x9 - BPF_CGROUP_INET4_CONNECT = 0xa - BPF_CGROUP_INET6_CONNECT = 0xb - BPF_CGROUP_INET4_POST_BIND = 0xc - BPF_CGROUP_INET6_POST_BIND = 0xd - BPF_CGROUP_UDP4_SENDMSG = 0xe - BPF_CGROUP_UDP6_SENDMSG = 0xf - BPF_LIRC_MODE2 = 0x10 - BPF_FLOW_DISSECTOR = 0x11 - BPF_CGROUP_SYSCTL = 0x12 - BPF_CGROUP_UDP4_RECVMSG = 0x13 - BPF_CGROUP_UDP6_RECVMSG = 0x14 - BPF_CGROUP_GETSOCKOPT = 0x15 - BPF_CGROUP_SETSOCKOPT = 0x16 - BPF_TRACE_RAW_TP = 0x17 - BPF_TRACE_FENTRY = 0x18 - BPF_TRACE_FEXIT = 0x19 - BPF_MODIFY_RETURN = 0x1a - BPF_LSM_MAC = 0x1b - BPF_TRACE_ITER = 0x1c - BPF_CGROUP_INET4_GETPEERNAME = 0x1d - BPF_CGROUP_INET6_GETPEERNAME = 0x1e - BPF_CGROUP_INET4_GETSOCKNAME = 0x1f - BPF_CGROUP_INET6_GETSOCKNAME = 0x20 - BPF_XDP_DEVMAP = 0x21 - BPF_LINK_TYPE_UNSPEC = 0x0 - BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 - BPF_LINK_TYPE_TRACING = 0x2 - BPF_LINK_TYPE_CGROUP = 0x3 - BPF_LINK_TYPE_ITER = 0x4 - BPF_LINK_TYPE_NETNS = 0x5 - BPF_ANY = 0x0 - BPF_NOEXIST = 0x1 - BPF_EXIST = 0x2 - BPF_F_LOCK = 0x4 - BPF_F_NO_PREALLOC = 0x1 - BPF_F_NO_COMMON_LRU = 0x2 - BPF_F_NUMA_NODE = 0x4 - BPF_F_RDONLY = 0x8 - BPF_F_WRONLY = 0x10 - BPF_F_STACK_BUILD_ID = 0x20 - BPF_F_ZERO_SEED = 0x40 - BPF_F_RDONLY_PROG = 0x80 - BPF_F_WRONLY_PROG = 0x100 - BPF_F_CLONE = 0x200 - BPF_F_MMAPABLE = 0x400 - BPF_STATS_RUN_TIME = 0x0 - BPF_STACK_BUILD_ID_EMPTY = 0x0 - BPF_STACK_BUILD_ID_VALID = 0x1 - BPF_STACK_BUILD_ID_IP = 0x2 - BPF_F_RECOMPUTE_CSUM = 0x1 - BPF_F_INVALIDATE_HASH = 0x2 - BPF_F_HDR_FIELD_MASK = 0xf - BPF_F_PSEUDO_HDR = 0x10 - BPF_F_MARK_MANGLED_0 = 0x20 - BPF_F_MARK_ENFORCE = 0x40 - BPF_F_INGRESS = 0x1 - BPF_F_TUNINFO_IPV6 = 0x1 - BPF_F_SKIP_FIELD_MASK = 0xff - BPF_F_USER_STACK = 0x100 - BPF_F_FAST_STACK_CMP = 0x200 - BPF_F_REUSE_STACKID = 0x400 - BPF_F_USER_BUILD_ID = 0x800 - BPF_F_ZERO_CSUM_TX = 0x2 - BPF_F_DONT_FRAGMENT = 0x4 - BPF_F_SEQ_NUMBER = 0x8 - BPF_F_INDEX_MASK = 0xffffffff - BPF_F_CURRENT_CPU = 0xffffffff - BPF_F_CTXLEN_MASK = 0xfffff00000000 - BPF_F_CURRENT_NETNS = -0x1 - BPF_CSUM_LEVEL_QUERY = 0x0 - BPF_CSUM_LEVEL_INC = 0x1 - BPF_CSUM_LEVEL_DEC = 0x2 - BPF_CSUM_LEVEL_RESET = 0x3 - BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 - BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 - BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 - BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 - BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 - BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff - BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 - BPF_F_SYSCTL_BASE_NAME = 0x1 - BPF_SK_STORAGE_GET_F_CREATE = 0x1 - BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1 - BPF_RB_NO_WAKEUP = 0x1 - BPF_RB_FORCE_WAKEUP = 0x2 - BPF_RB_AVAIL_DATA = 0x0 - BPF_RB_RING_SIZE = 0x1 - BPF_RB_CONS_POS = 0x2 - BPF_RB_PROD_POS = 0x3 - BPF_RINGBUF_BUSY_BIT = 0x80000000 - BPF_RINGBUF_DISCARD_BIT = 0x40000000 - BPF_RINGBUF_HDR_SZ = 0x8 - BPF_ADJ_ROOM_NET = 0x0 - BPF_ADJ_ROOM_MAC = 0x1 - BPF_HDR_START_MAC = 0x0 - BPF_HDR_START_NET = 0x1 - BPF_LWT_ENCAP_SEG6 = 0x0 - BPF_LWT_ENCAP_SEG6_INLINE = 0x1 - BPF_LWT_ENCAP_IP = 0x2 - BPF_OK = 0x0 - BPF_DROP = 0x2 - BPF_REDIRECT = 0x7 - BPF_LWT_REROUTE = 0x80 - BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 - BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 - BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 - BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 - BPF_SOCK_OPS_ALL_CB_FLAGS = 0xf - BPF_SOCK_OPS_VOID = 0x0 - BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 - BPF_SOCK_OPS_RWND_INIT = 0x2 - BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 - BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 - BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 - BPF_SOCK_OPS_NEEDS_ECN = 0x6 - BPF_SOCK_OPS_BASE_RTT = 0x7 - BPF_SOCK_OPS_RTO_CB = 0x8 - BPF_SOCK_OPS_RETRANS_CB = 0x9 - BPF_SOCK_OPS_STATE_CB = 0xa - BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb - BPF_SOCK_OPS_RTT_CB = 0xc - BPF_TCP_ESTABLISHED = 0x1 - BPF_TCP_SYN_SENT = 0x2 - BPF_TCP_SYN_RECV = 0x3 - BPF_TCP_FIN_WAIT1 = 0x4 - BPF_TCP_FIN_WAIT2 = 0x5 - BPF_TCP_TIME_WAIT = 0x6 - BPF_TCP_CLOSE = 0x7 - BPF_TCP_CLOSE_WAIT = 0x8 - BPF_TCP_LAST_ACK = 0x9 - BPF_TCP_LISTEN = 0xa - BPF_TCP_CLOSING = 0xb - BPF_TCP_NEW_SYN_RECV = 0xc - BPF_TCP_MAX_STATES = 0xd - TCP_BPF_IW = 0x3e9 - TCP_BPF_SNDCWND_CLAMP = 0x3ea - BPF_DEVCG_ACC_MKNOD = 0x1 - BPF_DEVCG_ACC_READ = 0x2 - BPF_DEVCG_ACC_WRITE = 0x4 - BPF_DEVCG_DEV_BLOCK = 0x1 - BPF_DEVCG_DEV_CHAR = 0x2 - BPF_FIB_LOOKUP_DIRECT = 0x1 - BPF_FIB_LOOKUP_OUTPUT = 0x2 - BPF_FIB_LKUP_RET_SUCCESS = 0x0 - BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 - BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 - BPF_FIB_LKUP_RET_PROHIBIT = 0x3 - BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 - BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 - BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 - BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 - BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 - BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 - BPF_FD_TYPE_TRACEPOINT = 0x1 - BPF_FD_TYPE_KPROBE = 0x2 - BPF_FD_TYPE_KRETPROBE = 0x3 - BPF_FD_TYPE_UPROBE = 0x4 - BPF_FD_TYPE_URETPROBE = 0x5 - BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 - BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 - BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 + BPF_REG_0 = 0x0 + BPF_REG_1 = 0x1 + BPF_REG_2 = 0x2 + BPF_REG_3 = 0x3 + BPF_REG_4 = 0x4 + BPF_REG_5 = 0x5 + BPF_REG_6 = 0x6 + BPF_REG_7 = 0x7 + BPF_REG_8 = 0x8 + BPF_REG_9 = 0x9 + BPF_REG_10 = 0xa + BPF_MAP_CREATE = 0x0 + BPF_MAP_LOOKUP_ELEM = 0x1 + BPF_MAP_UPDATE_ELEM = 0x2 + BPF_MAP_DELETE_ELEM = 0x3 + BPF_MAP_GET_NEXT_KEY = 0x4 + BPF_PROG_LOAD = 0x5 + BPF_OBJ_PIN = 0x6 + BPF_OBJ_GET = 0x7 + BPF_PROG_ATTACH = 0x8 + BPF_PROG_DETACH = 0x9 + BPF_PROG_TEST_RUN = 0xa + BPF_PROG_GET_NEXT_ID = 0xb + BPF_MAP_GET_NEXT_ID = 0xc + BPF_PROG_GET_FD_BY_ID = 0xd + BPF_MAP_GET_FD_BY_ID = 0xe + BPF_OBJ_GET_INFO_BY_FD = 0xf + BPF_PROG_QUERY = 0x10 + BPF_RAW_TRACEPOINT_OPEN = 0x11 + BPF_BTF_LOAD = 0x12 + BPF_BTF_GET_FD_BY_ID = 0x13 + BPF_TASK_FD_QUERY = 0x14 + BPF_MAP_LOOKUP_AND_DELETE_ELEM = 0x15 + BPF_MAP_FREEZE = 0x16 + BPF_BTF_GET_NEXT_ID = 0x17 + BPF_MAP_LOOKUP_BATCH = 0x18 + BPF_MAP_LOOKUP_AND_DELETE_BATCH = 0x19 + BPF_MAP_UPDATE_BATCH = 0x1a + BPF_MAP_DELETE_BATCH = 0x1b + BPF_LINK_CREATE = 0x1c + BPF_LINK_UPDATE = 0x1d + BPF_LINK_GET_FD_BY_ID = 0x1e + BPF_LINK_GET_NEXT_ID = 0x1f + BPF_ENABLE_STATS = 0x20 + BPF_ITER_CREATE = 0x21 + BPF_LINK_DETACH = 0x22 + BPF_PROG_BIND_MAP = 0x23 + BPF_MAP_TYPE_UNSPEC = 0x0 + BPF_MAP_TYPE_HASH = 0x1 + BPF_MAP_TYPE_ARRAY = 0x2 + BPF_MAP_TYPE_PROG_ARRAY = 0x3 + BPF_MAP_TYPE_PERF_EVENT_ARRAY = 0x4 + BPF_MAP_TYPE_PERCPU_HASH = 0x5 + BPF_MAP_TYPE_PERCPU_ARRAY = 0x6 + BPF_MAP_TYPE_STACK_TRACE = 0x7 + BPF_MAP_TYPE_CGROUP_ARRAY = 0x8 + BPF_MAP_TYPE_LRU_HASH = 0x9 + BPF_MAP_TYPE_LRU_PERCPU_HASH = 0xa + BPF_MAP_TYPE_LPM_TRIE = 0xb + BPF_MAP_TYPE_ARRAY_OF_MAPS = 0xc + BPF_MAP_TYPE_HASH_OF_MAPS = 0xd + BPF_MAP_TYPE_DEVMAP = 0xe + BPF_MAP_TYPE_SOCKMAP = 0xf + BPF_MAP_TYPE_CPUMAP = 0x10 + BPF_MAP_TYPE_XSKMAP = 0x11 + BPF_MAP_TYPE_SOCKHASH = 0x12 + BPF_MAP_TYPE_CGROUP_STORAGE = 0x13 + BPF_MAP_TYPE_REUSEPORT_SOCKARRAY = 0x14 + BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE = 0x15 + BPF_MAP_TYPE_QUEUE = 0x16 + BPF_MAP_TYPE_STACK = 0x17 + BPF_MAP_TYPE_SK_STORAGE = 0x18 + BPF_MAP_TYPE_DEVMAP_HASH = 0x19 + BPF_MAP_TYPE_STRUCT_OPS = 0x1a + BPF_MAP_TYPE_RINGBUF = 0x1b + BPF_MAP_TYPE_INODE_STORAGE = 0x1c + BPF_PROG_TYPE_UNSPEC = 0x0 + BPF_PROG_TYPE_SOCKET_FILTER = 0x1 + BPF_PROG_TYPE_KPROBE = 0x2 + BPF_PROG_TYPE_SCHED_CLS = 0x3 + BPF_PROG_TYPE_SCHED_ACT = 0x4 + BPF_PROG_TYPE_TRACEPOINT = 0x5 + BPF_PROG_TYPE_XDP = 0x6 + BPF_PROG_TYPE_PERF_EVENT = 0x7 + BPF_PROG_TYPE_CGROUP_SKB = 0x8 + BPF_PROG_TYPE_CGROUP_SOCK = 0x9 + BPF_PROG_TYPE_LWT_IN = 0xa + BPF_PROG_TYPE_LWT_OUT = 0xb + BPF_PROG_TYPE_LWT_XMIT = 0xc + BPF_PROG_TYPE_SOCK_OPS = 0xd + BPF_PROG_TYPE_SK_SKB = 0xe + BPF_PROG_TYPE_CGROUP_DEVICE = 0xf + BPF_PROG_TYPE_SK_MSG = 0x10 + BPF_PROG_TYPE_RAW_TRACEPOINT = 0x11 + BPF_PROG_TYPE_CGROUP_SOCK_ADDR = 0x12 + BPF_PROG_TYPE_LWT_SEG6LOCAL = 0x13 + BPF_PROG_TYPE_LIRC_MODE2 = 0x14 + BPF_PROG_TYPE_SK_REUSEPORT = 0x15 + BPF_PROG_TYPE_FLOW_DISSECTOR = 0x16 + BPF_PROG_TYPE_CGROUP_SYSCTL = 0x17 + BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE = 0x18 + BPF_PROG_TYPE_CGROUP_SOCKOPT = 0x19 + BPF_PROG_TYPE_TRACING = 0x1a + BPF_PROG_TYPE_STRUCT_OPS = 0x1b + BPF_PROG_TYPE_EXT = 0x1c + BPF_PROG_TYPE_LSM = 0x1d + BPF_PROG_TYPE_SK_LOOKUP = 0x1e + BPF_CGROUP_INET_INGRESS = 0x0 + BPF_CGROUP_INET_EGRESS = 0x1 + BPF_CGROUP_INET_SOCK_CREATE = 0x2 + BPF_CGROUP_SOCK_OPS = 0x3 + BPF_SK_SKB_STREAM_PARSER = 0x4 + BPF_SK_SKB_STREAM_VERDICT = 0x5 + BPF_CGROUP_DEVICE = 0x6 + BPF_SK_MSG_VERDICT = 0x7 + BPF_CGROUP_INET4_BIND = 0x8 + BPF_CGROUP_INET6_BIND = 0x9 + BPF_CGROUP_INET4_CONNECT = 0xa + BPF_CGROUP_INET6_CONNECT = 0xb + BPF_CGROUP_INET4_POST_BIND = 0xc + BPF_CGROUP_INET6_POST_BIND = 0xd + BPF_CGROUP_UDP4_SENDMSG = 0xe + BPF_CGROUP_UDP6_SENDMSG = 0xf + BPF_LIRC_MODE2 = 0x10 + BPF_FLOW_DISSECTOR = 0x11 + BPF_CGROUP_SYSCTL = 0x12 + BPF_CGROUP_UDP4_RECVMSG = 0x13 + BPF_CGROUP_UDP6_RECVMSG = 0x14 + BPF_CGROUP_GETSOCKOPT = 0x15 + BPF_CGROUP_SETSOCKOPT = 0x16 + BPF_TRACE_RAW_TP = 0x17 + BPF_TRACE_FENTRY = 0x18 + BPF_TRACE_FEXIT = 0x19 + BPF_MODIFY_RETURN = 0x1a + BPF_LSM_MAC = 0x1b + BPF_TRACE_ITER = 0x1c + BPF_CGROUP_INET4_GETPEERNAME = 0x1d + BPF_CGROUP_INET6_GETPEERNAME = 0x1e + BPF_CGROUP_INET4_GETSOCKNAME = 0x1f + BPF_CGROUP_INET6_GETSOCKNAME = 0x20 + BPF_XDP_DEVMAP = 0x21 + BPF_CGROUP_INET_SOCK_RELEASE = 0x22 + BPF_XDP_CPUMAP = 0x23 + BPF_SK_LOOKUP = 0x24 + BPF_XDP = 0x25 + BPF_LINK_TYPE_UNSPEC = 0x0 + BPF_LINK_TYPE_RAW_TRACEPOINT = 0x1 + BPF_LINK_TYPE_TRACING = 0x2 + BPF_LINK_TYPE_CGROUP = 0x3 + BPF_LINK_TYPE_ITER = 0x4 + BPF_LINK_TYPE_NETNS = 0x5 + BPF_LINK_TYPE_XDP = 0x6 + BPF_ANY = 0x0 + BPF_NOEXIST = 0x1 + BPF_EXIST = 0x2 + BPF_F_LOCK = 0x4 + BPF_F_NO_PREALLOC = 0x1 + BPF_F_NO_COMMON_LRU = 0x2 + BPF_F_NUMA_NODE = 0x4 + BPF_F_RDONLY = 0x8 + BPF_F_WRONLY = 0x10 + BPF_F_STACK_BUILD_ID = 0x20 + BPF_F_ZERO_SEED = 0x40 + BPF_F_RDONLY_PROG = 0x80 + BPF_F_WRONLY_PROG = 0x100 + BPF_F_CLONE = 0x200 + BPF_F_MMAPABLE = 0x400 + BPF_F_PRESERVE_ELEMS = 0x800 + BPF_F_INNER_MAP = 0x1000 + BPF_STATS_RUN_TIME = 0x0 + BPF_STACK_BUILD_ID_EMPTY = 0x0 + BPF_STACK_BUILD_ID_VALID = 0x1 + BPF_STACK_BUILD_ID_IP = 0x2 + BPF_F_RECOMPUTE_CSUM = 0x1 + BPF_F_INVALIDATE_HASH = 0x2 + BPF_F_HDR_FIELD_MASK = 0xf + BPF_F_PSEUDO_HDR = 0x10 + BPF_F_MARK_MANGLED_0 = 0x20 + BPF_F_MARK_ENFORCE = 0x40 + BPF_F_INGRESS = 0x1 + BPF_F_TUNINFO_IPV6 = 0x1 + BPF_F_SKIP_FIELD_MASK = 0xff + BPF_F_USER_STACK = 0x100 + BPF_F_FAST_STACK_CMP = 0x200 + BPF_F_REUSE_STACKID = 0x400 + BPF_F_USER_BUILD_ID = 0x800 + BPF_F_ZERO_CSUM_TX = 0x2 + BPF_F_DONT_FRAGMENT = 0x4 + BPF_F_SEQ_NUMBER = 0x8 + BPF_F_INDEX_MASK = 0xffffffff + BPF_F_CURRENT_CPU = 0xffffffff + BPF_F_CTXLEN_MASK = 0xfffff00000000 + BPF_F_CURRENT_NETNS = -0x1 + BPF_CSUM_LEVEL_QUERY = 0x0 + BPF_CSUM_LEVEL_INC = 0x1 + BPF_CSUM_LEVEL_DEC = 0x2 + BPF_CSUM_LEVEL_RESET = 0x3 + BPF_F_ADJ_ROOM_FIXED_GSO = 0x1 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV4 = 0x2 + BPF_F_ADJ_ROOM_ENCAP_L3_IPV6 = 0x4 + BPF_F_ADJ_ROOM_ENCAP_L4_GRE = 0x8 + BPF_F_ADJ_ROOM_ENCAP_L4_UDP = 0x10 + BPF_F_ADJ_ROOM_NO_CSUM_RESET = 0x20 + BPF_ADJ_ROOM_ENCAP_L2_MASK = 0xff + BPF_ADJ_ROOM_ENCAP_L2_SHIFT = 0x38 + BPF_F_SYSCTL_BASE_NAME = 0x1 + BPF_LOCAL_STORAGE_GET_F_CREATE = 0x1 + BPF_SK_STORAGE_GET_F_CREATE = 0x1 + BPF_F_GET_BRANCH_RECORDS_SIZE = 0x1 + BPF_RB_NO_WAKEUP = 0x1 + BPF_RB_FORCE_WAKEUP = 0x2 + BPF_RB_AVAIL_DATA = 0x0 + BPF_RB_RING_SIZE = 0x1 + BPF_RB_CONS_POS = 0x2 + BPF_RB_PROD_POS = 0x3 + BPF_RINGBUF_BUSY_BIT = 0x80000000 + BPF_RINGBUF_DISCARD_BIT = 0x40000000 + BPF_RINGBUF_HDR_SZ = 0x8 + BPF_SK_LOOKUP_F_REPLACE = 0x1 + BPF_SK_LOOKUP_F_NO_REUSEPORT = 0x2 + BPF_ADJ_ROOM_NET = 0x0 + BPF_ADJ_ROOM_MAC = 0x1 + BPF_HDR_START_MAC = 0x0 + BPF_HDR_START_NET = 0x1 + BPF_LWT_ENCAP_SEG6 = 0x0 + BPF_LWT_ENCAP_SEG6_INLINE = 0x1 + BPF_LWT_ENCAP_IP = 0x2 + BPF_OK = 0x0 + BPF_DROP = 0x2 + BPF_REDIRECT = 0x7 + BPF_LWT_REROUTE = 0x80 + BPF_SOCK_OPS_RTO_CB_FLAG = 0x1 + BPF_SOCK_OPS_RETRANS_CB_FLAG = 0x2 + BPF_SOCK_OPS_STATE_CB_FLAG = 0x4 + BPF_SOCK_OPS_RTT_CB_FLAG = 0x8 + BPF_SOCK_OPS_PARSE_ALL_HDR_OPT_CB_FLAG = 0x10 + BPF_SOCK_OPS_PARSE_UNKNOWN_HDR_OPT_CB_FLAG = 0x20 + BPF_SOCK_OPS_WRITE_HDR_OPT_CB_FLAG = 0x40 + BPF_SOCK_OPS_ALL_CB_FLAGS = 0x7f + BPF_SOCK_OPS_VOID = 0x0 + BPF_SOCK_OPS_TIMEOUT_INIT = 0x1 + BPF_SOCK_OPS_RWND_INIT = 0x2 + BPF_SOCK_OPS_TCP_CONNECT_CB = 0x3 + BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB = 0x4 + BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB = 0x5 + BPF_SOCK_OPS_NEEDS_ECN = 0x6 + BPF_SOCK_OPS_BASE_RTT = 0x7 + BPF_SOCK_OPS_RTO_CB = 0x8 + BPF_SOCK_OPS_RETRANS_CB = 0x9 + BPF_SOCK_OPS_STATE_CB = 0xa + BPF_SOCK_OPS_TCP_LISTEN_CB = 0xb + BPF_SOCK_OPS_RTT_CB = 0xc + BPF_SOCK_OPS_PARSE_HDR_OPT_CB = 0xd + BPF_SOCK_OPS_HDR_OPT_LEN_CB = 0xe + BPF_SOCK_OPS_WRITE_HDR_OPT_CB = 0xf + BPF_TCP_ESTABLISHED = 0x1 + BPF_TCP_SYN_SENT = 0x2 + BPF_TCP_SYN_RECV = 0x3 + BPF_TCP_FIN_WAIT1 = 0x4 + BPF_TCP_FIN_WAIT2 = 0x5 + BPF_TCP_TIME_WAIT = 0x6 + BPF_TCP_CLOSE = 0x7 + BPF_TCP_CLOSE_WAIT = 0x8 + BPF_TCP_LAST_ACK = 0x9 + BPF_TCP_LISTEN = 0xa + BPF_TCP_CLOSING = 0xb + BPF_TCP_NEW_SYN_RECV = 0xc + BPF_TCP_MAX_STATES = 0xd + TCP_BPF_IW = 0x3e9 + TCP_BPF_SNDCWND_CLAMP = 0x3ea + TCP_BPF_DELACK_MAX = 0x3eb + TCP_BPF_RTO_MIN = 0x3ec + TCP_BPF_SYN = 0x3ed + TCP_BPF_SYN_IP = 0x3ee + TCP_BPF_SYN_MAC = 0x3ef + BPF_LOAD_HDR_OPT_TCP_SYN = 0x1 + BPF_WRITE_HDR_TCP_CURRENT_MSS = 0x1 + BPF_WRITE_HDR_TCP_SYNACK_COOKIE = 0x2 + BPF_DEVCG_ACC_MKNOD = 0x1 + BPF_DEVCG_ACC_READ = 0x2 + BPF_DEVCG_ACC_WRITE = 0x4 + BPF_DEVCG_DEV_BLOCK = 0x1 + BPF_DEVCG_DEV_CHAR = 0x2 + BPF_FIB_LOOKUP_DIRECT = 0x1 + BPF_FIB_LOOKUP_OUTPUT = 0x2 + BPF_FIB_LKUP_RET_SUCCESS = 0x0 + BPF_FIB_LKUP_RET_BLACKHOLE = 0x1 + BPF_FIB_LKUP_RET_UNREACHABLE = 0x2 + BPF_FIB_LKUP_RET_PROHIBIT = 0x3 + BPF_FIB_LKUP_RET_NOT_FWDED = 0x4 + BPF_FIB_LKUP_RET_FWD_DISABLED = 0x5 + BPF_FIB_LKUP_RET_UNSUPP_LWT = 0x6 + BPF_FIB_LKUP_RET_NO_NEIGH = 0x7 + BPF_FIB_LKUP_RET_FRAG_NEEDED = 0x8 + BPF_FD_TYPE_RAW_TRACEPOINT = 0x0 + BPF_FD_TYPE_TRACEPOINT = 0x1 + BPF_FD_TYPE_KPROBE = 0x2 + BPF_FD_TYPE_KRETPROBE = 0x3 + BPF_FD_TYPE_UPROBE = 0x4 + BPF_FD_TYPE_URETPROBE = 0x5 + BPF_FLOW_DISSECTOR_F_PARSE_1ST_FRAG = 0x1 + BPF_FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL = 0x2 + BPF_FLOW_DISSECTOR_F_STOP_AT_ENCAP = 0x4 ) const ( @@ -2681,6 +2718,7 @@ const ( RTNLGRP_IPV4_MROUTE_R = 0x1e RTNLGRP_IPV6_MROUTE_R = 0x1f RTNLGRP_NEXTHOP = 0x20 + RTNLGRP_BRVLAN = 0x21 ) type CapUserHeader struct { @@ -2775,132 +2813,317 @@ const ( ) const ( - DEVLINK_CMD_UNSPEC = 0x0 - DEVLINK_CMD_GET = 0x1 - DEVLINK_CMD_SET = 0x2 - DEVLINK_CMD_NEW = 0x3 - DEVLINK_CMD_DEL = 0x4 - DEVLINK_CMD_PORT_GET = 0x5 - DEVLINK_CMD_PORT_SET = 0x6 - DEVLINK_CMD_PORT_NEW = 0x7 - DEVLINK_CMD_PORT_DEL = 0x8 - DEVLINK_CMD_PORT_SPLIT = 0x9 - DEVLINK_CMD_PORT_UNSPLIT = 0xa - DEVLINK_CMD_SB_GET = 0xb - DEVLINK_CMD_SB_SET = 0xc - DEVLINK_CMD_SB_NEW = 0xd - DEVLINK_CMD_SB_DEL = 0xe - DEVLINK_CMD_SB_POOL_GET = 0xf - DEVLINK_CMD_SB_POOL_SET = 0x10 - DEVLINK_CMD_SB_POOL_NEW = 0x11 - DEVLINK_CMD_SB_POOL_DEL = 0x12 - DEVLINK_CMD_SB_PORT_POOL_GET = 0x13 - DEVLINK_CMD_SB_PORT_POOL_SET = 0x14 - DEVLINK_CMD_SB_PORT_POOL_NEW = 0x15 - DEVLINK_CMD_SB_PORT_POOL_DEL = 0x16 - DEVLINK_CMD_SB_TC_POOL_BIND_GET = 0x17 - DEVLINK_CMD_SB_TC_POOL_BIND_SET = 0x18 - DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 0x19 - DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 0x1a - DEVLINK_CMD_SB_OCC_SNAPSHOT = 0x1b - DEVLINK_CMD_SB_OCC_MAX_CLEAR = 0x1c - DEVLINK_CMD_ESWITCH_GET = 0x1d - DEVLINK_CMD_ESWITCH_SET = 0x1e - DEVLINK_CMD_DPIPE_TABLE_GET = 0x1f - DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20 - DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21 - DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22 - DEVLINK_CMD_MAX = 0x48 - DEVLINK_PORT_TYPE_NOTSET = 0x0 - DEVLINK_PORT_TYPE_AUTO = 0x1 - DEVLINK_PORT_TYPE_ETH = 0x2 - DEVLINK_PORT_TYPE_IB = 0x3 - DEVLINK_SB_POOL_TYPE_INGRESS = 0x0 - DEVLINK_SB_POOL_TYPE_EGRESS = 0x1 - DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0x0 - DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 0x1 - DEVLINK_ESWITCH_MODE_LEGACY = 0x0 - DEVLINK_ESWITCH_MODE_SWITCHDEV = 0x1 - DEVLINK_ESWITCH_INLINE_MODE_NONE = 0x0 - DEVLINK_ESWITCH_INLINE_MODE_LINK = 0x1 - DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 0x2 - DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 0x3 - DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0x0 - DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 0x1 - DEVLINK_ATTR_UNSPEC = 0x0 - DEVLINK_ATTR_BUS_NAME = 0x1 - DEVLINK_ATTR_DEV_NAME = 0x2 - DEVLINK_ATTR_PORT_INDEX = 0x3 - DEVLINK_ATTR_PORT_TYPE = 0x4 - DEVLINK_ATTR_PORT_DESIRED_TYPE = 0x5 - DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 0x6 - DEVLINK_ATTR_PORT_NETDEV_NAME = 0x7 - DEVLINK_ATTR_PORT_IBDEV_NAME = 0x8 - DEVLINK_ATTR_PORT_SPLIT_COUNT = 0x9 - DEVLINK_ATTR_PORT_SPLIT_GROUP = 0xa - DEVLINK_ATTR_SB_INDEX = 0xb - DEVLINK_ATTR_SB_SIZE = 0xc - DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 0xd - DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 0xe - DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 0xf - DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 0x10 - DEVLINK_ATTR_SB_POOL_INDEX = 0x11 - DEVLINK_ATTR_SB_POOL_TYPE = 0x12 - DEVLINK_ATTR_SB_POOL_SIZE = 0x13 - DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 0x14 - DEVLINK_ATTR_SB_THRESHOLD = 0x15 - DEVLINK_ATTR_SB_TC_INDEX = 0x16 - DEVLINK_ATTR_SB_OCC_CUR = 0x17 - DEVLINK_ATTR_SB_OCC_MAX = 0x18 - DEVLINK_ATTR_ESWITCH_MODE = 0x19 - DEVLINK_ATTR_ESWITCH_INLINE_MODE = 0x1a - DEVLINK_ATTR_DPIPE_TABLES = 0x1b - DEVLINK_ATTR_DPIPE_TABLE = 0x1c - DEVLINK_ATTR_DPIPE_TABLE_NAME = 0x1d - DEVLINK_ATTR_DPIPE_TABLE_SIZE = 0x1e - DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 0x1f - DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 0x20 - DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 0x21 - DEVLINK_ATTR_DPIPE_ENTRIES = 0x22 - DEVLINK_ATTR_DPIPE_ENTRY = 0x23 - DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 0x24 - DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 0x25 - DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 0x26 - DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 0x27 - DEVLINK_ATTR_DPIPE_MATCH = 0x28 - DEVLINK_ATTR_DPIPE_MATCH_VALUE = 0x29 - DEVLINK_ATTR_DPIPE_MATCH_TYPE = 0x2a - DEVLINK_ATTR_DPIPE_ACTION = 0x2b - DEVLINK_ATTR_DPIPE_ACTION_VALUE = 0x2c - DEVLINK_ATTR_DPIPE_ACTION_TYPE = 0x2d - DEVLINK_ATTR_DPIPE_VALUE = 0x2e - DEVLINK_ATTR_DPIPE_VALUE_MASK = 0x2f - DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 0x30 - DEVLINK_ATTR_DPIPE_HEADERS = 0x31 - DEVLINK_ATTR_DPIPE_HEADER = 0x32 - DEVLINK_ATTR_DPIPE_HEADER_NAME = 0x33 - DEVLINK_ATTR_DPIPE_HEADER_ID = 0x34 - DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 0x35 - DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 0x36 - DEVLINK_ATTR_DPIPE_HEADER_INDEX = 0x37 - DEVLINK_ATTR_DPIPE_FIELD = 0x38 - DEVLINK_ATTR_DPIPE_FIELD_NAME = 0x39 - DEVLINK_ATTR_DPIPE_FIELD_ID = 0x3a - DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 0x3b - DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c - DEVLINK_ATTR_PAD = 0x3d - DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e - DEVLINK_ATTR_MAX = 0x94 - DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 - DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 - DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 - DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0x0 - DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0x0 - DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0x0 - DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0x0 - DEVLINK_DPIPE_HEADER_ETHERNET = 0x0 - DEVLINK_DPIPE_HEADER_IPV4 = 0x1 - DEVLINK_DPIPE_HEADER_IPV6 = 0x2 + DEVLINK_CMD_UNSPEC = 0x0 + DEVLINK_CMD_GET = 0x1 + DEVLINK_CMD_SET = 0x2 + DEVLINK_CMD_NEW = 0x3 + DEVLINK_CMD_DEL = 0x4 + DEVLINK_CMD_PORT_GET = 0x5 + DEVLINK_CMD_PORT_SET = 0x6 + DEVLINK_CMD_PORT_NEW = 0x7 + DEVLINK_CMD_PORT_DEL = 0x8 + DEVLINK_CMD_PORT_SPLIT = 0x9 + DEVLINK_CMD_PORT_UNSPLIT = 0xa + DEVLINK_CMD_SB_GET = 0xb + DEVLINK_CMD_SB_SET = 0xc + DEVLINK_CMD_SB_NEW = 0xd + DEVLINK_CMD_SB_DEL = 0xe + DEVLINK_CMD_SB_POOL_GET = 0xf + DEVLINK_CMD_SB_POOL_SET = 0x10 + DEVLINK_CMD_SB_POOL_NEW = 0x11 + DEVLINK_CMD_SB_POOL_DEL = 0x12 + DEVLINK_CMD_SB_PORT_POOL_GET = 0x13 + DEVLINK_CMD_SB_PORT_POOL_SET = 0x14 + DEVLINK_CMD_SB_PORT_POOL_NEW = 0x15 + DEVLINK_CMD_SB_PORT_POOL_DEL = 0x16 + DEVLINK_CMD_SB_TC_POOL_BIND_GET = 0x17 + DEVLINK_CMD_SB_TC_POOL_BIND_SET = 0x18 + DEVLINK_CMD_SB_TC_POOL_BIND_NEW = 0x19 + DEVLINK_CMD_SB_TC_POOL_BIND_DEL = 0x1a + DEVLINK_CMD_SB_OCC_SNAPSHOT = 0x1b + DEVLINK_CMD_SB_OCC_MAX_CLEAR = 0x1c + DEVLINK_CMD_ESWITCH_GET = 0x1d + DEVLINK_CMD_ESWITCH_SET = 0x1e + DEVLINK_CMD_DPIPE_TABLE_GET = 0x1f + DEVLINK_CMD_DPIPE_ENTRIES_GET = 0x20 + DEVLINK_CMD_DPIPE_HEADERS_GET = 0x21 + DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET = 0x22 + DEVLINK_CMD_RESOURCE_SET = 0x23 + DEVLINK_CMD_RESOURCE_DUMP = 0x24 + DEVLINK_CMD_RELOAD = 0x25 + DEVLINK_CMD_PARAM_GET = 0x26 + DEVLINK_CMD_PARAM_SET = 0x27 + DEVLINK_CMD_PARAM_NEW = 0x28 + DEVLINK_CMD_PARAM_DEL = 0x29 + DEVLINK_CMD_REGION_GET = 0x2a + DEVLINK_CMD_REGION_SET = 0x2b + DEVLINK_CMD_REGION_NEW = 0x2c + DEVLINK_CMD_REGION_DEL = 0x2d + DEVLINK_CMD_REGION_READ = 0x2e + DEVLINK_CMD_PORT_PARAM_GET = 0x2f + DEVLINK_CMD_PORT_PARAM_SET = 0x30 + DEVLINK_CMD_PORT_PARAM_NEW = 0x31 + DEVLINK_CMD_PORT_PARAM_DEL = 0x32 + DEVLINK_CMD_INFO_GET = 0x33 + DEVLINK_CMD_HEALTH_REPORTER_GET = 0x34 + DEVLINK_CMD_HEALTH_REPORTER_SET = 0x35 + DEVLINK_CMD_HEALTH_REPORTER_RECOVER = 0x36 + DEVLINK_CMD_HEALTH_REPORTER_DIAGNOSE = 0x37 + DEVLINK_CMD_HEALTH_REPORTER_DUMP_GET = 0x38 + DEVLINK_CMD_HEALTH_REPORTER_DUMP_CLEAR = 0x39 + DEVLINK_CMD_FLASH_UPDATE = 0x3a + DEVLINK_CMD_FLASH_UPDATE_END = 0x3b + DEVLINK_CMD_FLASH_UPDATE_STATUS = 0x3c + DEVLINK_CMD_TRAP_GET = 0x3d + DEVLINK_CMD_TRAP_SET = 0x3e + DEVLINK_CMD_TRAP_NEW = 0x3f + DEVLINK_CMD_TRAP_DEL = 0x40 + DEVLINK_CMD_TRAP_GROUP_GET = 0x41 + DEVLINK_CMD_TRAP_GROUP_SET = 0x42 + DEVLINK_CMD_TRAP_GROUP_NEW = 0x43 + DEVLINK_CMD_TRAP_GROUP_DEL = 0x44 + DEVLINK_CMD_TRAP_POLICER_GET = 0x45 + DEVLINK_CMD_TRAP_POLICER_SET = 0x46 + DEVLINK_CMD_TRAP_POLICER_NEW = 0x47 + DEVLINK_CMD_TRAP_POLICER_DEL = 0x48 + DEVLINK_CMD_HEALTH_REPORTER_TEST = 0x49 + DEVLINK_CMD_MAX = 0x49 + DEVLINK_PORT_TYPE_NOTSET = 0x0 + DEVLINK_PORT_TYPE_AUTO = 0x1 + DEVLINK_PORT_TYPE_ETH = 0x2 + DEVLINK_PORT_TYPE_IB = 0x3 + DEVLINK_SB_POOL_TYPE_INGRESS = 0x0 + DEVLINK_SB_POOL_TYPE_EGRESS = 0x1 + DEVLINK_SB_THRESHOLD_TYPE_STATIC = 0x0 + DEVLINK_SB_THRESHOLD_TYPE_DYNAMIC = 0x1 + DEVLINK_ESWITCH_MODE_LEGACY = 0x0 + DEVLINK_ESWITCH_MODE_SWITCHDEV = 0x1 + DEVLINK_ESWITCH_INLINE_MODE_NONE = 0x0 + DEVLINK_ESWITCH_INLINE_MODE_LINK = 0x1 + DEVLINK_ESWITCH_INLINE_MODE_NETWORK = 0x2 + DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT = 0x3 + DEVLINK_ESWITCH_ENCAP_MODE_NONE = 0x0 + DEVLINK_ESWITCH_ENCAP_MODE_BASIC = 0x1 + DEVLINK_PORT_FLAVOUR_PHYSICAL = 0x0 + DEVLINK_PORT_FLAVOUR_CPU = 0x1 + DEVLINK_PORT_FLAVOUR_DSA = 0x2 + DEVLINK_PORT_FLAVOUR_PCI_PF = 0x3 + DEVLINK_PORT_FLAVOUR_PCI_VF = 0x4 + DEVLINK_PORT_FLAVOUR_VIRTUAL = 0x5 + DEVLINK_PORT_FLAVOUR_UNUSED = 0x6 + DEVLINK_PARAM_CMODE_RUNTIME = 0x0 + DEVLINK_PARAM_CMODE_DRIVERINIT = 0x1 + DEVLINK_PARAM_CMODE_PERMANENT = 0x2 + DEVLINK_PARAM_CMODE_MAX = 0x2 + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER = 0x0 + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH = 0x1 + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DISK = 0x2 + DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_UNKNOWN = 0x3 + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_UNKNOWN = 0x0 + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_ALWAYS = 0x1 + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_NEVER = 0x2 + DEVLINK_PARAM_RESET_DEV_ON_DRV_PROBE_VALUE_DISK = 0x3 + DEVLINK_ATTR_STATS_RX_PACKETS = 0x0 + DEVLINK_ATTR_STATS_RX_BYTES = 0x1 + DEVLINK_ATTR_STATS_RX_DROPPED = 0x2 + DEVLINK_ATTR_STATS_MAX = 0x2 + DEVLINK_FLASH_OVERWRITE_SETTINGS_BIT = 0x0 + DEVLINK_FLASH_OVERWRITE_IDENTIFIERS_BIT = 0x1 + DEVLINK_FLASH_OVERWRITE_MAX_BIT = 0x1 + DEVLINK_TRAP_ACTION_DROP = 0x0 + DEVLINK_TRAP_ACTION_TRAP = 0x1 + DEVLINK_TRAP_ACTION_MIRROR = 0x2 + DEVLINK_TRAP_TYPE_DROP = 0x0 + DEVLINK_TRAP_TYPE_EXCEPTION = 0x1 + DEVLINK_TRAP_TYPE_CONTROL = 0x2 + DEVLINK_ATTR_TRAP_METADATA_TYPE_IN_PORT = 0x0 + DEVLINK_ATTR_TRAP_METADATA_TYPE_FA_COOKIE = 0x1 + DEVLINK_RELOAD_ACTION_UNSPEC = 0x0 + DEVLINK_RELOAD_ACTION_DRIVER_REINIT = 0x1 + DEVLINK_RELOAD_ACTION_FW_ACTIVATE = 0x2 + DEVLINK_RELOAD_ACTION_MAX = 0x2 + DEVLINK_RELOAD_LIMIT_UNSPEC = 0x0 + DEVLINK_RELOAD_LIMIT_NO_RESET = 0x1 + DEVLINK_RELOAD_LIMIT_MAX = 0x1 + DEVLINK_ATTR_UNSPEC = 0x0 + DEVLINK_ATTR_BUS_NAME = 0x1 + DEVLINK_ATTR_DEV_NAME = 0x2 + DEVLINK_ATTR_PORT_INDEX = 0x3 + DEVLINK_ATTR_PORT_TYPE = 0x4 + DEVLINK_ATTR_PORT_DESIRED_TYPE = 0x5 + DEVLINK_ATTR_PORT_NETDEV_IFINDEX = 0x6 + DEVLINK_ATTR_PORT_NETDEV_NAME = 0x7 + DEVLINK_ATTR_PORT_IBDEV_NAME = 0x8 + DEVLINK_ATTR_PORT_SPLIT_COUNT = 0x9 + DEVLINK_ATTR_PORT_SPLIT_GROUP = 0xa + DEVLINK_ATTR_SB_INDEX = 0xb + DEVLINK_ATTR_SB_SIZE = 0xc + DEVLINK_ATTR_SB_INGRESS_POOL_COUNT = 0xd + DEVLINK_ATTR_SB_EGRESS_POOL_COUNT = 0xe + DEVLINK_ATTR_SB_INGRESS_TC_COUNT = 0xf + DEVLINK_ATTR_SB_EGRESS_TC_COUNT = 0x10 + DEVLINK_ATTR_SB_POOL_INDEX = 0x11 + DEVLINK_ATTR_SB_POOL_TYPE = 0x12 + DEVLINK_ATTR_SB_POOL_SIZE = 0x13 + DEVLINK_ATTR_SB_POOL_THRESHOLD_TYPE = 0x14 + DEVLINK_ATTR_SB_THRESHOLD = 0x15 + DEVLINK_ATTR_SB_TC_INDEX = 0x16 + DEVLINK_ATTR_SB_OCC_CUR = 0x17 + DEVLINK_ATTR_SB_OCC_MAX = 0x18 + DEVLINK_ATTR_ESWITCH_MODE = 0x19 + DEVLINK_ATTR_ESWITCH_INLINE_MODE = 0x1a + DEVLINK_ATTR_DPIPE_TABLES = 0x1b + DEVLINK_ATTR_DPIPE_TABLE = 0x1c + DEVLINK_ATTR_DPIPE_TABLE_NAME = 0x1d + DEVLINK_ATTR_DPIPE_TABLE_SIZE = 0x1e + DEVLINK_ATTR_DPIPE_TABLE_MATCHES = 0x1f + DEVLINK_ATTR_DPIPE_TABLE_ACTIONS = 0x20 + DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED = 0x21 + DEVLINK_ATTR_DPIPE_ENTRIES = 0x22 + DEVLINK_ATTR_DPIPE_ENTRY = 0x23 + DEVLINK_ATTR_DPIPE_ENTRY_INDEX = 0x24 + DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES = 0x25 + DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES = 0x26 + DEVLINK_ATTR_DPIPE_ENTRY_COUNTER = 0x27 + DEVLINK_ATTR_DPIPE_MATCH = 0x28 + DEVLINK_ATTR_DPIPE_MATCH_VALUE = 0x29 + DEVLINK_ATTR_DPIPE_MATCH_TYPE = 0x2a + DEVLINK_ATTR_DPIPE_ACTION = 0x2b + DEVLINK_ATTR_DPIPE_ACTION_VALUE = 0x2c + DEVLINK_ATTR_DPIPE_ACTION_TYPE = 0x2d + DEVLINK_ATTR_DPIPE_VALUE = 0x2e + DEVLINK_ATTR_DPIPE_VALUE_MASK = 0x2f + DEVLINK_ATTR_DPIPE_VALUE_MAPPING = 0x30 + DEVLINK_ATTR_DPIPE_HEADERS = 0x31 + DEVLINK_ATTR_DPIPE_HEADER = 0x32 + DEVLINK_ATTR_DPIPE_HEADER_NAME = 0x33 + DEVLINK_ATTR_DPIPE_HEADER_ID = 0x34 + DEVLINK_ATTR_DPIPE_HEADER_FIELDS = 0x35 + DEVLINK_ATTR_DPIPE_HEADER_GLOBAL = 0x36 + DEVLINK_ATTR_DPIPE_HEADER_INDEX = 0x37 + DEVLINK_ATTR_DPIPE_FIELD = 0x38 + DEVLINK_ATTR_DPIPE_FIELD_NAME = 0x39 + DEVLINK_ATTR_DPIPE_FIELD_ID = 0x3a + DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH = 0x3b + DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE = 0x3c + DEVLINK_ATTR_PAD = 0x3d + DEVLINK_ATTR_ESWITCH_ENCAP_MODE = 0x3e + DEVLINK_ATTR_RESOURCE_LIST = 0x3f + DEVLINK_ATTR_RESOURCE = 0x40 + DEVLINK_ATTR_RESOURCE_NAME = 0x41 + DEVLINK_ATTR_RESOURCE_ID = 0x42 + DEVLINK_ATTR_RESOURCE_SIZE = 0x43 + DEVLINK_ATTR_RESOURCE_SIZE_NEW = 0x44 + DEVLINK_ATTR_RESOURCE_SIZE_VALID = 0x45 + DEVLINK_ATTR_RESOURCE_SIZE_MIN = 0x46 + DEVLINK_ATTR_RESOURCE_SIZE_MAX = 0x47 + DEVLINK_ATTR_RESOURCE_SIZE_GRAN = 0x48 + DEVLINK_ATTR_RESOURCE_UNIT = 0x49 + DEVLINK_ATTR_RESOURCE_OCC = 0x4a + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_ID = 0x4b + DEVLINK_ATTR_DPIPE_TABLE_RESOURCE_UNITS = 0x4c + DEVLINK_ATTR_PORT_FLAVOUR = 0x4d + DEVLINK_ATTR_PORT_NUMBER = 0x4e + DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER = 0x4f + DEVLINK_ATTR_PARAM = 0x50 + DEVLINK_ATTR_PARAM_NAME = 0x51 + DEVLINK_ATTR_PARAM_GENERIC = 0x52 + DEVLINK_ATTR_PARAM_TYPE = 0x53 + DEVLINK_ATTR_PARAM_VALUES_LIST = 0x54 + DEVLINK_ATTR_PARAM_VALUE = 0x55 + DEVLINK_ATTR_PARAM_VALUE_DATA = 0x56 + DEVLINK_ATTR_PARAM_VALUE_CMODE = 0x57 + DEVLINK_ATTR_REGION_NAME = 0x58 + DEVLINK_ATTR_REGION_SIZE = 0x59 + DEVLINK_ATTR_REGION_SNAPSHOTS = 0x5a + DEVLINK_ATTR_REGION_SNAPSHOT = 0x5b + DEVLINK_ATTR_REGION_SNAPSHOT_ID = 0x5c + DEVLINK_ATTR_REGION_CHUNKS = 0x5d + DEVLINK_ATTR_REGION_CHUNK = 0x5e + DEVLINK_ATTR_REGION_CHUNK_DATA = 0x5f + DEVLINK_ATTR_REGION_CHUNK_ADDR = 0x60 + DEVLINK_ATTR_REGION_CHUNK_LEN = 0x61 + DEVLINK_ATTR_INFO_DRIVER_NAME = 0x62 + DEVLINK_ATTR_INFO_SERIAL_NUMBER = 0x63 + DEVLINK_ATTR_INFO_VERSION_FIXED = 0x64 + DEVLINK_ATTR_INFO_VERSION_RUNNING = 0x65 + DEVLINK_ATTR_INFO_VERSION_STORED = 0x66 + DEVLINK_ATTR_INFO_VERSION_NAME = 0x67 + DEVLINK_ATTR_INFO_VERSION_VALUE = 0x68 + DEVLINK_ATTR_SB_POOL_CELL_SIZE = 0x69 + DEVLINK_ATTR_FMSG = 0x6a + DEVLINK_ATTR_FMSG_OBJ_NEST_START = 0x6b + DEVLINK_ATTR_FMSG_PAIR_NEST_START = 0x6c + DEVLINK_ATTR_FMSG_ARR_NEST_START = 0x6d + DEVLINK_ATTR_FMSG_NEST_END = 0x6e + DEVLINK_ATTR_FMSG_OBJ_NAME = 0x6f + DEVLINK_ATTR_FMSG_OBJ_VALUE_TYPE = 0x70 + DEVLINK_ATTR_FMSG_OBJ_VALUE_DATA = 0x71 + DEVLINK_ATTR_HEALTH_REPORTER = 0x72 + DEVLINK_ATTR_HEALTH_REPORTER_NAME = 0x73 + DEVLINK_ATTR_HEALTH_REPORTER_STATE = 0x74 + DEVLINK_ATTR_HEALTH_REPORTER_ERR_COUNT = 0x75 + DEVLINK_ATTR_HEALTH_REPORTER_RECOVER_COUNT = 0x76 + DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS = 0x77 + DEVLINK_ATTR_HEALTH_REPORTER_GRACEFUL_PERIOD = 0x78 + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_RECOVER = 0x79 + DEVLINK_ATTR_FLASH_UPDATE_FILE_NAME = 0x7a + DEVLINK_ATTR_FLASH_UPDATE_COMPONENT = 0x7b + DEVLINK_ATTR_FLASH_UPDATE_STATUS_MSG = 0x7c + DEVLINK_ATTR_FLASH_UPDATE_STATUS_DONE = 0x7d + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TOTAL = 0x7e + DEVLINK_ATTR_PORT_PCI_PF_NUMBER = 0x7f + DEVLINK_ATTR_PORT_PCI_VF_NUMBER = 0x80 + DEVLINK_ATTR_STATS = 0x81 + DEVLINK_ATTR_TRAP_NAME = 0x82 + DEVLINK_ATTR_TRAP_ACTION = 0x83 + DEVLINK_ATTR_TRAP_TYPE = 0x84 + DEVLINK_ATTR_TRAP_GENERIC = 0x85 + DEVLINK_ATTR_TRAP_METADATA = 0x86 + DEVLINK_ATTR_TRAP_GROUP_NAME = 0x87 + DEVLINK_ATTR_RELOAD_FAILED = 0x88 + DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS = 0x89 + DEVLINK_ATTR_NETNS_FD = 0x8a + DEVLINK_ATTR_NETNS_PID = 0x8b + DEVLINK_ATTR_NETNS_ID = 0x8c + DEVLINK_ATTR_HEALTH_REPORTER_AUTO_DUMP = 0x8d + DEVLINK_ATTR_TRAP_POLICER_ID = 0x8e + DEVLINK_ATTR_TRAP_POLICER_RATE = 0x8f + DEVLINK_ATTR_TRAP_POLICER_BURST = 0x90 + DEVLINK_ATTR_PORT_FUNCTION = 0x91 + DEVLINK_ATTR_INFO_BOARD_SERIAL_NUMBER = 0x92 + DEVLINK_ATTR_PORT_LANES = 0x93 + DEVLINK_ATTR_PORT_SPLITTABLE = 0x94 + DEVLINK_ATTR_PORT_EXTERNAL = 0x95 + DEVLINK_ATTR_PORT_CONTROLLER_NUMBER = 0x96 + DEVLINK_ATTR_FLASH_UPDATE_STATUS_TIMEOUT = 0x97 + DEVLINK_ATTR_FLASH_UPDATE_OVERWRITE_MASK = 0x98 + DEVLINK_ATTR_RELOAD_ACTION = 0x99 + DEVLINK_ATTR_RELOAD_ACTIONS_PERFORMED = 0x9a + DEVLINK_ATTR_RELOAD_LIMITS = 0x9b + DEVLINK_ATTR_DEV_STATS = 0x9c + DEVLINK_ATTR_RELOAD_STATS = 0x9d + DEVLINK_ATTR_RELOAD_STATS_ENTRY = 0x9e + DEVLINK_ATTR_RELOAD_STATS_LIMIT = 0x9f + DEVLINK_ATTR_RELOAD_STATS_VALUE = 0xa0 + DEVLINK_ATTR_REMOTE_RELOAD_STATS = 0xa1 + DEVLINK_ATTR_RELOAD_ACTION_INFO = 0xa2 + DEVLINK_ATTR_RELOAD_ACTION_STATS = 0xa3 + DEVLINK_ATTR_MAX = 0xa3 + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE = 0x0 + DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX = 0x1 + DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT = 0x0 + DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY = 0x0 + DEVLINK_DPIPE_FIELD_ETHERNET_DST_MAC = 0x0 + DEVLINK_DPIPE_FIELD_IPV4_DST_IP = 0x0 + DEVLINK_DPIPE_FIELD_IPV6_DST_IP = 0x0 + DEVLINK_DPIPE_HEADER_ETHERNET = 0x0 + DEVLINK_DPIPE_HEADER_IPV4 = 0x1 + DEVLINK_DPIPE_HEADER_IPV6 = 0x2 + DEVLINK_RESOURCE_UNIT_ENTRY = 0x0 + DEVLINK_PORT_FUNCTION_ATTR_UNSPEC = 0x0 + DEVLINK_PORT_FUNCTION_ATTR_HW_ADDR = 0x1 + DEVLINK_PORT_FUNCTION_ATTR_MAX = 0x1 ) type FsverityDigest struct { diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index d54618aa61f..088bd77e3be 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build 386,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index 741d25be957..078d958ec95 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build amd64,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index e8d982c3df7..2d39122f410 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index 311cf2155d5..304cbd04536 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build arm64,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 1312bdf77fe..7d9d57006aa 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build mips,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 2a993481950..a1eb2577b08 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build mips64,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index f964307b293..2e5ce3b6a69 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build mips64le,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index ca0fab27020..bbaa1200b6a 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build mipsle,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index 257e0042473..0e6e8a77483 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build ppc64,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 980dd31736a..7382f385faf 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build ppc64le,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go index d9fdab20b83..28d55221665 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_riscv64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build riscv64,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index c25de8c679c..a91a7a44bd3 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build s390x,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go index 97fca65340e..f824b2358dc 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_sparc64.go @@ -1,4 +1,4 @@ -// cgo -godefs -- -Wall -Werror -static -I/tmp/include linux/types.go | go run mkpost.go +// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/linux/types.go | go run mkpost.go // Code generated by the command above; see README.md. DO NOT EDIT. // +build sparc64,linux diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go index a89100c08ae..3f11f88e3c6 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go @@ -248,6 +248,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go index 289184e0b3a..0bed83af57b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go @@ -255,6 +255,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go index 428c450e4ce..e4e3bf736d8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go @@ -253,6 +253,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go index 6f1f2842cc3..efac861bb7f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm64.go @@ -255,6 +255,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x14 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go index 61ea0019a29..80fa295f1df 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go @@ -231,6 +231,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go index 87a493f68fd..560dd6d08af 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go @@ -235,6 +235,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go index d80836efaba..0c1700fa435 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm.go @@ -235,6 +235,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x8 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x1c diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go index 4e158746f11..5b3e46633e9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_arm64.go @@ -231,6 +231,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go index 992a1f8c018..62bff167097 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_openbsd_mips64.go @@ -231,6 +231,7 @@ const ( SizeofSockaddrUnix = 0x6a SizeofSockaddrDatalink = 0x20 SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go index db817f3ba82..ca512aff7f8 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go @@ -234,6 +234,7 @@ const ( SizeofSockaddrUnix = 0x6e SizeofSockaddrDatalink = 0xfc SizeofLinger = 0x8 + SizeofIovec = 0x10 SizeofIPMreq = 0x8 SizeofIPv6Mreq = 0x14 SizeofMsghdr = 0x30 diff --git a/vendor/golang.org/x/sys/windows/dll_windows.go b/vendor/golang.org/x/sys/windows/dll_windows.go index 9cd147b7e3f..115341fba66 100644 --- a/vendor/golang.org/x/sys/windows/dll_windows.go +++ b/vendor/golang.org/x/sys/windows/dll_windows.go @@ -391,7 +391,6 @@ func loadLibraryEx(name string, system bool) (*DLL, error) { var flags uintptr if system { if canDoSearchSystem32() { - const LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x00000800 flags = LOAD_LIBRARY_SEARCH_SYSTEM32 } else if isBaseName(name) { // WindowsXP or unpatched Windows machine diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go index 14906485f3a..69eb462c59a 100644 --- a/vendor/golang.org/x/sys/windows/security_windows.go +++ b/vendor/golang.org/x/sys/windows/security_windows.go @@ -624,6 +624,7 @@ func (tml *Tokenmandatorylabel) Size() uint32 { // Authorization Functions //sys checkTokenMembership(tokenHandle Token, sidToCheck *SID, isMember *int32) (err error) = advapi32.CheckTokenMembership +//sys isTokenRestricted(tokenHandle Token) (ret bool, err error) [!failretval] = advapi32.IsTokenRestricted //sys OpenProcessToken(process Handle, access uint32, token *Token) (err error) = advapi32.OpenProcessToken //sys OpenThreadToken(thread Handle, access uint32, openAsSelf bool, token *Token) (err error) = advapi32.OpenThreadToken //sys ImpersonateSelf(impersonationlevel uint32) (err error) = advapi32.ImpersonateSelf @@ -837,6 +838,16 @@ func (t Token) IsMember(sid *SID) (bool, error) { return b != 0, nil } +// IsRestricted reports whether the access token t is a restricted token. +func (t Token) IsRestricted() (isRestricted bool, err error) { + isRestricted, err = isTokenRestricted(t) + if !isRestricted && err == syscall.EINVAL { + // If err is EINVAL, this returned ERROR_SUCCESS indicating a non-restricted token. + err = nil + } + return +} + const ( WTS_CONSOLE_CONNECT = 0x1 WTS_CONSOLE_DISCONNECT = 0x2 diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index f54ff90aacd..b269850d066 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -128,6 +128,10 @@ const ( SERVICE_NOTIFY_CREATED = 0x00000080 SERVICE_NOTIFY_DELETED = 0x00000100 SERVICE_NOTIFY_DELETE_PENDING = 0x00000200 + + SC_EVENT_DATABASE_CHANGE = 0 + SC_EVENT_PROPERTY_CHANGE = 1 + SC_EVENT_STATUS_CHANGE = 2 ) type SERVICE_STATUS struct { @@ -229,3 +233,5 @@ type QUERY_SERVICE_LOCK_STATUS struct { //sys EnumServicesStatusEx(mgr Handle, infoLevel uint32, serviceType uint32, serviceState uint32, services *byte, bufSize uint32, bytesNeeded *uint32, servicesReturned *uint32, resumeHandle *uint32, groupName *uint16) (err error) = advapi32.EnumServicesStatusExW //sys QueryServiceStatusEx(service Handle, infoLevel uint32, buff *byte, buffSize uint32, bytesNeeded *uint32) (err error) = advapi32.QueryServiceStatusEx //sys NotifyServiceStatusChange(service Handle, notifyMask uint32, notifier *SERVICE_NOTIFY) (ret error) = advapi32.NotifyServiceStatusChangeW +//sys SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) = sechost.SubscribeServiceChangeNotifications? +//sys UnsubscribeServiceChangeNotifications(subscription uintptr) = sechost.UnsubscribeServiceChangeNotifications? diff --git a/vendor/golang.org/x/sys/windows/syscall_windows.go b/vendor/golang.org/x/sys/windows/syscall_windows.go index 86a46f7713a..0f17fb75eb9 100644 --- a/vendor/golang.org/x/sys/windows/syscall_windows.go +++ b/vendor/golang.org/x/sys/windows/syscall_windows.go @@ -170,6 +170,8 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys GetProcAddress(module Handle, procname string) (proc uintptr, err error) //sys GetModuleFileName(module Handle, filename *uint16, size uint32) (n uint32, err error) = kernel32.GetModuleFileNameW //sys GetModuleHandleEx(flags uint32, moduleName *uint16, module *Handle) (err error) = kernel32.GetModuleHandleExW +//sys SetDefaultDllDirectories(directoryFlags uint32) (err error) +//sys SetDllDirectory(path string) (err error) = kernel32.SetDllDirectoryW //sys GetVersion() (ver uint32, err error) //sys FormatMessage(flags uint32, msgsrc uintptr, msgid uint32, langid uint32, buf []uint16, args *byte) (n uint32, err error) = FormatMessageW //sys ExitProcess(exitcode uint32) @@ -258,7 +260,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys TransmitFile(s Handle, handle Handle, bytesToWrite uint32, bytsPerSend uint32, overlapped *Overlapped, transmitFileBuf *TransmitFileBuffers, flags uint32) (err error) = mswsock.TransmitFile //sys ReadDirectoryChanges(handle Handle, buf *byte, buflen uint32, watchSubTree bool, mask uint32, retlen *uint32, overlapped *Overlapped, completionRoutine uintptr) (err error) = kernel32.ReadDirectoryChangesW //sys CertOpenSystemStore(hprov Handle, name *uint16) (store Handle, err error) = crypt32.CertOpenSystemStoreW -//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) [failretval==InvalidHandle] = crypt32.CertOpenStore +//sys CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) = crypt32.CertOpenStore //sys CertEnumCertificatesInStore(store Handle, prevContext *CertContext) (context *CertContext, err error) [failretval==nil] = crypt32.CertEnumCertificatesInStore //sys CertAddCertificateContextToStore(store Handle, certContext *CertContext, addDisposition uint32, storeContext **CertContext) (err error) = crypt32.CertAddCertificateContextToStore //sys CertCloseStore(store Handle, flags uint32) (err error) = crypt32.CertCloseStore @@ -273,6 +275,7 @@ func NewCallbackCDecl(fn interface{}) uintptr { //sys RegQueryInfoKey(key Handle, class *uint16, classLen *uint32, reserved *uint32, subkeysLen *uint32, maxSubkeyLen *uint32, maxClassLen *uint32, valuesLen *uint32, maxValueNameLen *uint32, maxValueLen *uint32, saLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegQueryInfoKeyW //sys RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reserved *uint32, class *uint16, classLen *uint32, lastWriteTime *Filetime) (regerrno error) = advapi32.RegEnumKeyExW //sys RegQueryValueEx(key Handle, name *uint16, reserved *uint32, valtype *uint32, buf *byte, buflen *uint32) (regerrno error) = advapi32.RegQueryValueExW +//sys RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) = advapi32.RegNotifyChangeKeyValue //sys GetCurrentProcessId() (pid uint32) = kernel32.GetCurrentProcessId //sys ProcessIdToSessionId(pid uint32, sessionid *uint32) (err error) = kernel32.ProcessIdToSessionId //sys GetConsoleMode(console Handle, mode *uint32) (err error) = kernel32.GetConsoleMode diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index e7ae37f8848..bbede404128 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -1801,3 +1801,40 @@ const ( FileCaseSensitiveInfo = 23 FileNormalizedNameInfo = 24 ) + +// LoadLibrary flags for determining from where to search for a DLL +const ( + DONT_RESOLVE_DLL_REFERENCES = 0x1 + LOAD_LIBRARY_AS_DATAFILE = 0x2 + LOAD_WITH_ALTERED_SEARCH_PATH = 0x8 + LOAD_IGNORE_CODE_AUTHZ_LEVEL = 0x10 + LOAD_LIBRARY_AS_IMAGE_RESOURCE = 0x20 + LOAD_LIBRARY_AS_DATAFILE_EXCLUSIVE = 0x40 + LOAD_LIBRARY_REQUIRE_SIGNED_TARGET = 0x80 + LOAD_LIBRARY_SEARCH_DLL_LOAD_DIR = 0x100 + LOAD_LIBRARY_SEARCH_APPLICATION_DIR = 0x200 + LOAD_LIBRARY_SEARCH_USER_DIRS = 0x400 + LOAD_LIBRARY_SEARCH_SYSTEM32 = 0x800 + LOAD_LIBRARY_SEARCH_DEFAULT_DIRS = 0x1000 + LOAD_LIBRARY_SAFE_CURRENT_DIRS = 0x00002000 + LOAD_LIBRARY_SEARCH_SYSTEM32_NO_FORWARDER = 0x00004000 + LOAD_LIBRARY_OS_INTEGRITY_CONTINUITY = 0x00008000 +) + +// RegNotifyChangeKeyValue notifyFilter flags. +const ( + // REG_NOTIFY_CHANGE_NAME notifies the caller if a subkey is added or deleted. + REG_NOTIFY_CHANGE_NAME = 0x00000001 + + // REG_NOTIFY_CHANGE_ATTRIBUTES notifies the caller of changes to the attributes of the key, such as the security descriptor information. + REG_NOTIFY_CHANGE_ATTRIBUTES = 0x00000002 + + // REG_NOTIFY_CHANGE_LAST_SET notifies the caller of changes to a value of the key. This can include adding or deleting a value, or changing an existing value. + REG_NOTIFY_CHANGE_LAST_SET = 0x00000004 + + // REG_NOTIFY_CHANGE_SECURITY notifies the caller of changes to the security descriptor of the key. + REG_NOTIFY_CHANGE_SECURITY = 0x00000008 + + // REG_NOTIFY_THREAD_AGNOSTIC indicates that the lifetime of the registration must not be tied to the lifetime of the thread issuing the RegNotifyChangeKeyValue call. Note: This flag value is only supported in Windows 8 and later. + REG_NOTIFY_THREAD_AGNOSTIC = 0x10000000 +) diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go index 8fbef7da669..72a91a5f16e 100644 --- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go +++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go @@ -46,6 +46,7 @@ var ( modntdll = NewLazySystemDLL("ntdll.dll") modole32 = NewLazySystemDLL("ole32.dll") modpsapi = NewLazySystemDLL("psapi.dll") + modsechost = NewLazySystemDLL("sechost.dll") modsecur32 = NewLazySystemDLL("secur32.dll") modshell32 = NewLazySystemDLL("shell32.dll") moduser32 = NewLazySystemDLL("user32.dll") @@ -95,6 +96,7 @@ var ( procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") procInitializeSecurityDescriptor = modadvapi32.NewProc("InitializeSecurityDescriptor") procInitiateSystemShutdownExW = modadvapi32.NewProc("InitiateSystemShutdownExW") + procIsTokenRestricted = modadvapi32.NewProc("IsTokenRestricted") procIsValidSecurityDescriptor = modadvapi32.NewProc("IsValidSecurityDescriptor") procIsValidSid = modadvapi32.NewProc("IsValidSid") procIsWellKnownSid = modadvapi32.NewProc("IsWellKnownSid") @@ -115,6 +117,7 @@ var ( procQueryServiceStatusEx = modadvapi32.NewProc("QueryServiceStatusEx") procRegCloseKey = modadvapi32.NewProc("RegCloseKey") procRegEnumKeyExW = modadvapi32.NewProc("RegEnumKeyExW") + procRegNotifyChangeKeyValue = modadvapi32.NewProc("RegNotifyChangeKeyValue") procRegOpenKeyExW = modadvapi32.NewProc("RegOpenKeyExW") procRegQueryInfoKeyW = modadvapi32.NewProc("RegQueryInfoKeyW") procRegQueryValueExW = modadvapi32.NewProc("RegQueryValueExW") @@ -279,6 +282,8 @@ var ( procSetConsoleCursorPosition = modkernel32.NewProc("SetConsoleCursorPosition") procSetConsoleMode = modkernel32.NewProc("SetConsoleMode") procSetCurrentDirectoryW = modkernel32.NewProc("SetCurrentDirectoryW") + procSetDefaultDllDirectories = modkernel32.NewProc("SetDefaultDllDirectories") + procSetDllDirectoryW = modkernel32.NewProc("SetDllDirectoryW") procSetEndOfFile = modkernel32.NewProc("SetEndOfFile") procSetEnvironmentVariableW = modkernel32.NewProc("SetEnvironmentVariableW") procSetErrorMode = modkernel32.NewProc("SetErrorMode") @@ -326,6 +331,8 @@ var ( procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") procStringFromGUID2 = modole32.NewProc("StringFromGUID2") procEnumProcesses = modpsapi.NewProc("EnumProcesses") + procSubscribeServiceChangeNotifications = modsechost.NewProc("SubscribeServiceChangeNotifications") + procUnsubscribeServiceChangeNotifications = modsechost.NewProc("UnsubscribeServiceChangeNotifications") procGetUserNameExW = modsecur32.NewProc("GetUserNameExW") procTranslateNameW = modsecur32.NewProc("TranslateNameW") procCommandLineToArgvW = modshell32.NewProc("CommandLineToArgvW") @@ -756,6 +763,15 @@ func InitiateSystemShutdownEx(machineName *uint16, message *uint16, timeout uint return } +func isTokenRestricted(tokenHandle Token) (ret bool, err error) { + r0, _, e1 := syscall.Syscall(procIsTokenRestricted.Addr(), 1, uintptr(tokenHandle), 0, 0) + ret = r0 != 0 + if !ret { + err = errnoErr(e1) + } + return +} + func isValidSecurityDescriptor(sd *SECURITY_DESCRIPTOR) (isValid bool) { r0, _, _ := syscall.Syscall(procIsValidSecurityDescriptor.Addr(), 1, uintptr(unsafe.Pointer(sd)), 0, 0) isValid = r0 != 0 @@ -916,6 +932,22 @@ func RegEnumKeyEx(key Handle, index uint32, name *uint16, nameLen *uint32, reser return } +func RegNotifyChangeKeyValue(key Handle, watchSubtree bool, notifyFilter uint32, event Handle, asynchronous bool) (regerrno error) { + var _p0 uint32 + if watchSubtree { + _p0 = 1 + } + var _p1 uint32 + if asynchronous { + _p1 = 1 + } + r0, _, _ := syscall.Syscall6(procRegNotifyChangeKeyValue.Addr(), 5, uintptr(key), uintptr(_p0), uintptr(notifyFilter), uintptr(event), uintptr(_p1), 0) + if r0 != 0 { + regerrno = syscall.Errno(r0) + } + return +} + func RegOpenKeyEx(key Handle, subkey *uint16, options uint32, desiredAccess uint32, result *Handle) (regerrno error) { r0, _, _ := syscall.Syscall6(procRegOpenKeyExW.Addr(), 5, uintptr(key), uintptr(unsafe.Pointer(subkey)), uintptr(options), uintptr(desiredAccess), uintptr(unsafe.Pointer(result)), 0) if r0 != 0 { @@ -1181,7 +1213,7 @@ func CertGetCertificateChain(engine Handle, leaf *CertContext, time *Filetime, a func CertOpenStore(storeProvider uintptr, msgAndCertEncodingType uint32, cryptProv uintptr, flags uint32, para uintptr) (handle Handle, err error) { r0, _, e1 := syscall.Syscall6(procCertOpenStore.Addr(), 5, uintptr(storeProvider), uintptr(msgAndCertEncodingType), uintptr(cryptProv), uintptr(flags), uintptr(para), 0) handle = Handle(r0) - if handle == InvalidHandle { + if handle == 0 { err = errnoErr(e1) } return @@ -2366,6 +2398,31 @@ func SetCurrentDirectory(path *uint16) (err error) { return } +func SetDefaultDllDirectories(directoryFlags uint32) (err error) { + r1, _, e1 := syscall.Syscall(procSetDefaultDllDirectories.Addr(), 1, uintptr(directoryFlags), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + +func SetDllDirectory(path string) (err error) { + var _p0 *uint16 + _p0, err = syscall.UTF16PtrFromString(path) + if err != nil { + return + } + return _SetDllDirectory(_p0) +} + +func _SetDllDirectory(path *uint16) (err error) { + r1, _, e1 := syscall.Syscall(procSetDllDirectoryW.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) + if r1 == 0 { + err = errnoErr(e1) + } + return +} + func SetEndOfFile(handle Handle) (err error) { r1, _, e1 := syscall.Syscall(procSetEndOfFile.Addr(), 1, uintptr(handle), 0, 0) if r1 == 0 { @@ -2752,6 +2809,27 @@ func EnumProcesses(processIds []uint32, bytesReturned *uint32) (err error) { return } +func SubscribeServiceChangeNotifications(service Handle, eventType uint32, callback uintptr, callbackCtx uintptr, subscription *uintptr) (ret error) { + ret = procSubscribeServiceChangeNotifications.Find() + if ret != nil { + return + } + r0, _, _ := syscall.Syscall6(procSubscribeServiceChangeNotifications.Addr(), 5, uintptr(service), uintptr(eventType), uintptr(callback), uintptr(callbackCtx), uintptr(unsafe.Pointer(subscription)), 0) + if r0 != 0 { + ret = syscall.Errno(r0) + } + return +} + +func UnsubscribeServiceChangeNotifications(subscription uintptr) (err error) { + err = procUnsubscribeServiceChangeNotifications.Find() + if err != nil { + return + } + syscall.Syscall(procUnsubscribeServiceChangeNotifications.Addr(), 1, uintptr(subscription), 0, 0) + return +} + func GetUserNameEx(nameFormat uint32, nameBuffre *uint16, nSize *uint32) (err error) { r1, _, e1 := syscall.Syscall(procGetUserNameExW.Addr(), 3, uintptr(nameFormat), uintptr(unsafe.Pointer(nameBuffre)), uintptr(unsafe.Pointer(nSize))) if r1&0xff == 0 { diff --git a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go index f0b15051c52..02555648a0b 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/structtag/structtag.go @@ -207,12 +207,12 @@ var ( ) // validateStructTag parses the struct tag and returns an error if it is not -// in the canonical format, which is a space-separated list of key:"value" -// settings. The value may contain spaces. +// in the canonical format, as defined by reflect.StructTag. func validateStructTag(tag string) error { // This code is based on the StructTag.Get code in package reflect. n := 0 + var keys []string for ; tag != ""; n++ { if n > 0 && tag != "" && tag[0] != ' ' { // More restrictive than reflect, but catches likely mistakes @@ -240,14 +240,27 @@ func validateStructTag(tag string) error { if i == 0 { return errTagKeySyntax } - if i+1 >= len(tag) || tag[i] != ':' { + if i+1 >= len(tag) || tag[i] < ' ' || tag[i] == 0x7f { return errTagSyntax } - if tag[i+1] != '"' { + key := tag[:i] + keys = append(keys, key) + tag = tag[i:] + + // If we found a space char here - assume that we have a tag with + // multiple keys. + if tag[0] == ' ' { + continue + } + + // Spaces were filtered above so we assume that here we have + // only valid tag value started with `:"`. + if tag[0] != ':' || tag[1] != '"' { return errTagValueSyntax } - key := tag[:i] - tag = tag[i+1:] + + // Remove the colon leaving tag at the start of the quoted string. + tag = tag[1:] // Scan quoted string to find value. i = 1 @@ -263,51 +276,56 @@ func validateStructTag(tag string) error { qvalue := tag[:i+1] tag = tag[i+1:] - value, err := strconv.Unquote(qvalue) + wholeValue, err := strconv.Unquote(qvalue) if err != nil { return errTagValueSyntax } - if !checkTagSpaces[key] { - continue - } - - switch key { - case "xml": - // If the first or last character in the XML tag is a space, it is - // suspicious. - if strings.Trim(value, " ") != value { - return errTagValueSpace + for _, key := range keys { + if !checkTagSpaces[key] { + continue } - // If there are multiple spaces, they are suspicious. - if strings.Count(value, " ") > 1 { - return errTagValueSpace - } + value := wholeValue + switch key { + case "xml": + // If the first or last character in the XML tag is a space, it is + // suspicious. + if strings.Trim(value, " ") != value { + return errTagValueSpace + } - // If there is no comma, skip the rest of the checks. - comma := strings.IndexRune(value, ',') - if comma < 0 { - continue + // If there are multiple spaces, they are suspicious. + if strings.Count(value, " ") > 1 { + return errTagValueSpace + } + + // If there is no comma, skip the rest of the checks. + comma := strings.IndexRune(value, ',') + if comma < 0 { + continue + } + + // If the character before a comma is a space, this is suspicious. + if comma > 0 && value[comma-1] == ' ' { + return errTagValueSpace + } + value = value[comma+1:] + case "json": + // JSON allows using spaces in the name, so skip it. + comma := strings.IndexRune(value, ',') + if comma < 0 { + continue + } + value = value[comma+1:] } - // If the character before a comma is a space, this is suspicious. - if comma > 0 && value[comma-1] == ' ' { + if strings.IndexByte(value, ' ') >= 0 { return errTagValueSpace } - value = value[comma+1:] - case "json": - // JSON allows using spaces in the name, so skip it. - comma := strings.IndexRune(value, ',') - if comma < 0 { - continue - } - value = value[comma+1:] } - if strings.IndexByte(value, ' ') >= 0 { - return errTagValueSpace - } + keys = keys[:0] } return nil } diff --git a/vendor/golang.org/x/tools/go/ssa/mode.go b/vendor/golang.org/x/tools/go/ssa/mode.go index d2a269893a7..298f24b91f5 100644 --- a/vendor/golang.org/x/tools/go/ssa/mode.go +++ b/vendor/golang.org/x/tools/go/ssa/mode.go @@ -66,6 +66,9 @@ func (m BuilderMode) String() string { if m&BuildSerially != 0 { buf.WriteByte('L') } + if m&BareInits != 0 { + buf.WriteByte('I') + } return buf.String() } @@ -88,6 +91,8 @@ func (m *BuilderMode) Set(s string) error { mode |= NaiveForm case 'L': mode |= BuildSerially + case 'I': + mode |= BareInits default: return fmt.Errorf("unknown BuilderMode option: %q", c) } diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go index e37b4949150..a6cf0e64a4b 100644 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -12,7 +12,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// Event holds the information about an event of note that ocurred. +// Event holds the information about an event of note that occurred. type Event struct { at time.Time diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go index 73f7a495879..ce3269a4306 100644 --- a/vendor/golang.org/x/tools/internal/imports/mod.go +++ b/vendor/golang.org/x/tools/internal/imports/mod.go @@ -88,7 +88,11 @@ func (r *ModuleResolver) init() error { if gmc := r.env.Env["GOMODCACHE"]; gmc != "" { r.moduleCacheDir = gmc } else { - r.moduleCacheDir = filepath.Join(filepath.SplitList(goenv["GOPATH"])[0], "/pkg/mod") + gopaths := filepath.SplitList(goenv["GOPATH"]) + if len(gopaths) == 0 { + return fmt.Errorf("empty GOPATH") + } + r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod") } sort.Slice(r.modsByModPath, func(i, j int) bool { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index a5bb408e2f1..c3e1a397dbf 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -2,9 +2,12 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// Package typesinternal provides access to internal go/types APIs that are not +// yet exported. package typesinternal import ( + "go/token" "go/types" "reflect" "unsafe" @@ -26,3 +29,17 @@ func SetUsesCgo(conf *types.Config) bool { return true } + +func ReadGo116ErrorData(terr types.Error) (ErrorCode, token.Pos, token.Pos, bool) { + var data [3]int + // By coincidence all of these fields are ints, which simplifies things. + v := reflect.ValueOf(terr) + for i, name := range []string{"go116code", "go116start", "go116end"} { + f := v.FieldByName(name) + if !f.IsValid() { + return 0, 0, 0, false + } + data[i] = int(f.Int()) + } + return ErrorCode(data[0]), token.Pos(data[1]), token.Pos(data[2]), true +} diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json index fecc65193c7..6694fb2a736 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-api.json @@ -1171,7 +1171,7 @@ } } }, - "revision": "20201027", + "revision": "20201111", "rootUrl": "https://cloudresourcemanager.googleapis.com/", "schemas": { "Ancestor": { @@ -1237,9 +1237,6 @@ "description": "Associates `members` with a `role`.", "id": "Binding", "properties": { - "bindingId": { - "type": "string" - }, "condition": { "$ref": "Expr", "description": "The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies)." @@ -1403,6 +1400,83 @@ }, "type": "object" }, + "CreateFolderMetadata": { + "description": "Metadata pertaining to the Folder creation process.", + "id": "CreateFolderMetadata", + "properties": { + "displayName": { + "description": "The display name of the folder.", + "type": "string" + }, + "parent": { + "description": "The resource name of the folder or organization we are creating the folder under.", + "type": "string" + } + }, + "type": "object" + }, + "CreateProjectMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by CreateProject. It provides insight for when significant phases of Project creation have completed.", + "id": "CreateProjectMetadata", + "properties": { + "createTime": { + "description": "Creation time of the project creation workflow.", + "format": "google-datetime", + "type": "string" + }, + "gettable": { + "description": "True if the project can be retrieved using GetProject. No other operations on the project are guaranteed to work until the project creation is complete.", + "type": "boolean" + }, + "ready": { + "description": "True if the project creation process is complete.", + "type": "boolean" + } + }, + "type": "object" + }, + "CreateTagKeyMetadata": { + "description": "Runtime operation information for creating a TagKey.", + "id": "CreateTagKeyMetadata", + "properties": {}, + "type": "object" + }, + "CreateTagValueMetadata": { + "description": "Runtime operation information for creating a TagValue.", + "id": "CreateTagValueMetadata", + "properties": {}, + "type": "object" + }, + "DeleteFolderMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by DeleteFolder.", + "id": "DeleteFolderMetadata", + "properties": {}, + "type": "object" + }, + "DeleteOrganizationMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by DeleteOrganization.", + "id": "DeleteOrganizationMetadata", + "properties": {}, + "type": "object" + }, + "DeleteProjectMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by DeleteProject.", + "id": "DeleteProjectMetadata", + "properties": {}, + "type": "object" + }, + "DeleteTagKeyMetadata": { + "description": "Runtime operation information for deleting a TagKey.", + "id": "DeleteTagKeyMetadata", + "properties": {}, + "type": "object" + }, + "DeleteTagValueMetadata": { + "description": "Runtime operation information for deleting a TagValue.", + "id": "DeleteTagValueMetadata", + "properties": {}, + "type": "object" + }, "Empty": { "description": "A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); } The JSON representation for `Empty` is empty JSON object `{}`.", "id": "Empty", @@ -1762,6 +1836,31 @@ }, "type": "object" }, + "MoveFolderMetadata": { + "description": "Metadata pertaining to the Folder move process.", + "id": "MoveFolderMetadata", + "properties": { + "destinationParent": { + "description": "The resource name of the folder or organization to move the folder to.", + "type": "string" + }, + "displayName": { + "description": "The display name of the folder.", + "type": "string" + }, + "sourceParent": { + "description": "The resource name of the folder's parent.", + "type": "string" + } + }, + "type": "object" + }, + "MoveProjectMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by MoveProject.", + "id": "MoveProjectMetadata", + "properties": {}, + "type": "object" + }, "Operation": { "description": "This resource represents a long-running operation that is the result of a network API call.", "id": "Operation", @@ -2128,11 +2227,65 @@ }, "type": "object" }, + "UndeleteFolderMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by UndeleteFolder.", + "id": "UndeleteFolderMetadata", + "properties": {}, + "type": "object" + }, + "UndeleteOrganizationMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by UndeleteOrganization.", + "id": "UndeleteOrganizationMetadata", + "properties": {}, + "type": "object" + }, + "UndeleteProjectMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by UndeleteProject.", + "id": "UndeleteProjectMetadata", + "properties": {}, + "type": "object" + }, "UndeleteProjectRequest": { "description": "The request sent to the UndeleteProject method.", "id": "UndeleteProjectRequest", "properties": {}, "type": "object" + }, + "UndeleteTagKeyMetadata": { + "description": "Runtime operation information for undeleting a TagKey.", + "id": "UndeleteTagKeyMetadata", + "properties": {}, + "type": "object" + }, + "UndeleteTagValueMetadata": { + "description": "Runtime operation information for deleting a TagValue.", + "id": "UndeleteTagValueMetadata", + "properties": {}, + "type": "object" + }, + "UpdateFolderMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by UpdateFolder.", + "id": "UpdateFolderMetadata", + "properties": {}, + "type": "object" + }, + "UpdateProjectMetadata": { + "description": "A status object which is used as the `metadata` field for the Operation returned by UpdateProject.", + "id": "UpdateProjectMetadata", + "properties": {}, + "type": "object" + }, + "UpdateTagKeyMetadata": { + "description": "Runtime operation information for updating a TagKey.", + "id": "UpdateTagKeyMetadata", + "properties": {}, + "type": "object" + }, + "UpdateTagValueMetadata": { + "description": "Runtime operation information for updating a TagValue.", + "id": "UpdateTagValueMetadata", + "properties": {}, + "type": "object" } }, "servicePath": "", diff --git a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go index 0ad5f59b45f..fc7c9a9e981 100644 --- a/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go +++ b/vendor/google.golang.org/api/cloudresourcemanager/v1/cloudresourcemanager-gen.go @@ -327,8 +327,6 @@ func (s *AuditLogConfig) MarshalJSON() ([]byte, error) { // Binding: Associates `members` with a `role`. type Binding struct { - BindingId string `json:"bindingId,omitempty"` - // Condition: The condition that is associated with this binding. If the // condition evaluates to `true`, then this binding applies to the // current request. If the condition evaluates to `false`, then this @@ -378,7 +376,7 @@ type Binding struct { // `roles/viewer`, `roles/editor`, or `roles/owner`. Role string `json:"role,omitempty"` - // ForceSendFields is a list of field names (e.g. "BindingId") to + // ForceSendFields is a list of field names (e.g. "Condition") to // unconditionally include in API requests. By default, fields with // empty values are omitted from API requests. However, any non-pointer, // non-interface field appearing in ForceSendFields will be sent to the @@ -386,7 +384,7 @@ type Binding struct { // used to include empty fields in Patch requests. ForceSendFields []string `json:"-"` - // NullFields is a list of field names (e.g. "BindingId") to include in + // NullFields is a list of field names (e.g. "Condition") to include in // API requests with the JSON null value. By default, fields with empty // values are omitted from API requests. However, any field with an // empty value appearing in NullFields will be sent to the server as @@ -660,6 +658,113 @@ func (s *Constraint) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// CreateFolderMetadata: Metadata pertaining to the Folder creation +// process. +type CreateFolderMetadata struct { + // DisplayName: The display name of the folder. + DisplayName string `json:"displayName,omitempty"` + + // Parent: The resource name of the folder or organization we are + // creating the folder under. + Parent string `json:"parent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DisplayName") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DisplayName") to include + // in API requests with the JSON null value. By default, fields with + // empty values are omitted from API requests. However, any field with + // an empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateFolderMetadata) MarshalJSON() ([]byte, error) { + type NoMethod CreateFolderMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateProjectMetadata: A status object which is used as the +// `metadata` field for the Operation returned by CreateProject. It +// provides insight for when significant phases of Project creation have +// completed. +type CreateProjectMetadata struct { + // CreateTime: Creation time of the project creation workflow. + CreateTime string `json:"createTime,omitempty"` + + // Gettable: True if the project can be retrieved using GetProject. No + // other operations on the project are guaranteed to work until the + // project creation is complete. + Gettable bool `json:"gettable,omitempty"` + + // Ready: True if the project creation process is complete. + Ready bool `json:"ready,omitempty"` + + // ForceSendFields is a list of field names (e.g. "CreateTime") to + // unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "CreateTime") to include in + // API requests with the JSON null value. By default, fields with empty + // values are omitted from API requests. However, any field with an + // empty value appearing in NullFields will be sent to the server as + // null. It is an error if a field in this list has a non-empty value. + // This may be used to include null fields in Patch requests. + NullFields []string `json:"-"` +} + +func (s *CreateProjectMetadata) MarshalJSON() ([]byte, error) { + type NoMethod CreateProjectMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// CreateTagKeyMetadata: Runtime operation information for creating a +// TagKey. +type CreateTagKeyMetadata struct { +} + +// CreateTagValueMetadata: Runtime operation information for creating a +// TagValue. +type CreateTagValueMetadata struct { +} + +// DeleteFolderMetadata: A status object which is used as the `metadata` +// field for the Operation returned by DeleteFolder. +type DeleteFolderMetadata struct { +} + +// DeleteOrganizationMetadata: A status object which is used as the +// `metadata` field for the Operation returned by DeleteOrganization. +type DeleteOrganizationMetadata struct { +} + +// DeleteProjectMetadata: A status object which is used as the +// `metadata` field for the Operation returned by DeleteProject. +type DeleteProjectMetadata struct { +} + +// DeleteTagKeyMetadata: Runtime operation information for deleting a +// TagKey. +type DeleteTagKeyMetadata struct { +} + +// DeleteTagValueMetadata: Runtime operation information for deleting a +// TagValue. +type DeleteTagValueMetadata struct { +} + // Empty: A generic empty message that you can re-use to avoid defining // duplicated empty messages in your APIs. A typical example is to use // it as the request or the response type of an API method. For @@ -1457,6 +1562,47 @@ func (s *ListProjectsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// MoveFolderMetadata: Metadata pertaining to the Folder move process. +type MoveFolderMetadata struct { + // DestinationParent: The resource name of the folder or organization to + // move the folder to. + DestinationParent string `json:"destinationParent,omitempty"` + + // DisplayName: The display name of the folder. + DisplayName string `json:"displayName,omitempty"` + + // SourceParent: The resource name of the folder's parent. + SourceParent string `json:"sourceParent,omitempty"` + + // ForceSendFields is a list of field names (e.g. "DestinationParent") + // to unconditionally include in API requests. By default, fields with + // empty values are omitted from API requests. However, any non-pointer, + // non-interface field appearing in ForceSendFields will be sent to the + // server regardless of whether the field is empty or not. This may be + // used to include empty fields in Patch requests. + ForceSendFields []string `json:"-"` + + // NullFields is a list of field names (e.g. "DestinationParent") to + // include in API requests with the JSON null value. By default, fields + // with empty values are omitted from API requests. However, any field + // with an empty value appearing in NullFields will be sent to the + // server as null. It is an error if a field in this list has a + // non-empty value. This may be used to include null fields in Patch + // requests. + NullFields []string `json:"-"` +} + +func (s *MoveFolderMetadata) MarshalJSON() ([]byte, error) { + type NoMethod MoveFolderMetadata + raw := NoMethod(*s) + return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) +} + +// MoveProjectMetadata: A status object which is used as the `metadata` +// field for the Operation returned by MoveProject. +type MoveProjectMetadata struct { +} + // Operation: This resource represents a long-running operation that is // the result of a network API call. type Operation struct { @@ -2220,11 +2366,56 @@ func (s *TestIamPermissionsResponse) MarshalJSON() ([]byte, error) { return gensupport.MarshalJSON(raw, s.ForceSendFields, s.NullFields) } +// UndeleteFolderMetadata: A status object which is used as the +// `metadata` field for the Operation returned by UndeleteFolder. +type UndeleteFolderMetadata struct { +} + +// UndeleteOrganizationMetadata: A status object which is used as the +// `metadata` field for the Operation returned by UndeleteOrganization. +type UndeleteOrganizationMetadata struct { +} + +// UndeleteProjectMetadata: A status object which is used as the +// `metadata` field for the Operation returned by UndeleteProject. +type UndeleteProjectMetadata struct { +} + // UndeleteProjectRequest: The request sent to the UndeleteProject // method. type UndeleteProjectRequest struct { } +// UndeleteTagKeyMetadata: Runtime operation information for undeleting +// a TagKey. +type UndeleteTagKeyMetadata struct { +} + +// UndeleteTagValueMetadata: Runtime operation information for deleting +// a TagValue. +type UndeleteTagValueMetadata struct { +} + +// UpdateFolderMetadata: A status object which is used as the `metadata` +// field for the Operation returned by UpdateFolder. +type UpdateFolderMetadata struct { +} + +// UpdateProjectMetadata: A status object which is used as the +// `metadata` field for the Operation returned by UpdateProject. +type UpdateProjectMetadata struct { +} + +// UpdateTagKeyMetadata: Runtime operation information for updating a +// TagKey. +type UpdateTagKeyMetadata struct { +} + +// UpdateTagValueMetadata: Runtime operation information for updating a +// TagValue. +type UpdateTagValueMetadata struct { +} + // method id "cloudresourcemanager.folders.clearOrgPolicy": type FoldersClearOrgPolicyCall struct { @@ -2271,7 +2462,7 @@ func (c *FoldersClearOrgPolicyCall) Header() http.Header { func (c *FoldersClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2415,7 +2606,7 @@ func (c *FoldersGetEffectiveOrgPolicyCall) Header() http.Header { func (c *FoldersGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2560,7 +2751,7 @@ func (c *FoldersGetOrgPolicyCall) Header() http.Header { func (c *FoldersGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2702,7 +2893,7 @@ func (c *FoldersListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *FoldersListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2867,7 +3058,7 @@ func (c *FoldersListOrgPoliciesCall) Header() http.Header { func (c *FoldersListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3032,7 +3223,7 @@ func (c *FoldersSetOrgPolicyCall) Header() http.Header { func (c *FoldersSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3174,7 +3365,7 @@ func (c *LiensCreateCall) Header() http.Header { func (c *LiensCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3303,7 +3494,7 @@ func (c *LiensDeleteCall) Header() http.Header { func (c *LiensDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3449,7 +3640,7 @@ func (c *LiensGetCall) Header() http.Header { func (c *LiensGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3620,7 +3811,7 @@ func (c *LiensListCall) Header() http.Header { func (c *LiensListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3792,7 +3983,7 @@ func (c *OperationsGetCall) Header() http.Header { func (c *OperationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3928,7 +4119,7 @@ func (c *OrganizationsClearOrgPolicyCall) Header() http.Header { func (c *OrganizationsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4078,7 +4269,7 @@ func (c *OrganizationsGetCall) Header() http.Header { func (c *OrganizationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4218,7 +4409,7 @@ func (c *OrganizationsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4364,7 +4555,7 @@ func (c *OrganizationsGetIamPolicyCall) Header() http.Header { func (c *OrganizationsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4509,7 +4700,7 @@ func (c *OrganizationsGetOrgPolicyCall) Header() http.Header { func (c *OrganizationsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4651,7 +4842,7 @@ func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) Header() http.Heade func (c *OrganizationsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4816,7 +5007,7 @@ func (c *OrganizationsListOrgPoliciesCall) Header() http.Header { func (c *OrganizationsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4980,7 +5171,7 @@ func (c *OrganizationsSearchCall) Header() http.Header { func (c *OrganizationsSearchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5134,7 +5325,7 @@ func (c *OrganizationsSetIamPolicyCall) Header() http.Header { func (c *OrganizationsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5277,7 +5468,7 @@ func (c *OrganizationsSetOrgPolicyCall) Header() http.Header { func (c *OrganizationsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5420,7 +5611,7 @@ func (c *OrganizationsTestIamPermissionsCall) Header() http.Header { func (c *OrganizationsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5561,7 +5752,7 @@ func (c *ProjectsClearOrgPolicyCall) Header() http.Header { func (c *ProjectsClearOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5710,7 +5901,7 @@ func (c *ProjectsCreateCall) Header() http.Header { func (c *ProjectsCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5846,7 +6037,7 @@ func (c *ProjectsDeleteCall) Header() http.Header { func (c *ProjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5988,7 +6179,7 @@ func (c *ProjectsGetCall) Header() http.Header { func (c *ProjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6126,7 +6317,7 @@ func (c *ProjectsGetAncestryCall) Header() http.Header { func (c *ProjectsGetAncestryCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6270,7 +6461,7 @@ func (c *ProjectsGetEffectiveOrgPolicyCall) Header() http.Header { func (c *ProjectsGetEffectiveOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6417,7 +6608,7 @@ func (c *ProjectsGetIamPolicyCall) Header() http.Header { func (c *ProjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6561,7 +6752,7 @@ func (c *ProjectsGetOrgPolicyCall) Header() http.Header { func (c *ProjectsGetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6769,7 +6960,7 @@ func (c *ProjectsListCall) Header() http.Header { func (c *ProjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6931,7 +7122,7 @@ func (c *ProjectsListAvailableOrgPolicyConstraintsCall) Header() http.Header { func (c *ProjectsListAvailableOrgPolicyConstraintsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7096,7 +7287,7 @@ func (c *ProjectsListOrgPoliciesCall) Header() http.Header { func (c *ProjectsListOrgPoliciesCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7294,7 +7485,7 @@ func (c *ProjectsSetIamPolicyCall) Header() http.Header { func (c *ProjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7436,7 +7627,7 @@ func (c *ProjectsSetOrgPolicyCall) Header() http.Header { func (c *ProjectsSetOrgPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7580,7 +7771,7 @@ func (c *ProjectsTestIamPermissionsCall) Header() http.Header { func (c *ProjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7724,7 +7915,7 @@ func (c *ProjectsUndeleteCall) Header() http.Header { func (c *ProjectsUndeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7865,7 +8056,7 @@ func (c *ProjectsUpdateCall) Header() http.Header { func (c *ProjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/internal/creds.go b/vendor/google.golang.org/api/internal/creds.go index dc6d50e96aa..c93daa98c32 100644 --- a/vendor/google.golang.org/api/internal/creds.go +++ b/vendor/google.golang.org/api/internal/creds.go @@ -34,24 +34,24 @@ func baseCreds(ctx context.Context, ds *DialSettings) (*google.Credentials, erro return ds.Credentials, nil } if ds.CredentialsJSON != nil { - return credentialsFromJSON(ctx, ds.CredentialsJSON, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, ds.CredentialsJSON, ds) } if ds.CredentialsFile != "" { data, err := ioutil.ReadFile(ds.CredentialsFile) if err != nil { return nil, fmt.Errorf("cannot read credentials file: %v", err) } - return credentialsFromJSON(ctx, data, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, data, ds) } if ds.TokenSource != nil { return &google.Credentials{TokenSource: ds.TokenSource}, nil } - cred, err := google.FindDefaultCredentials(ctx, ds.Scopes...) + cred, err := google.FindDefaultCredentials(ctx, ds.GetScopes()...) if err != nil { return nil, err } if len(cred.JSON) > 0 { - return credentialsFromJSON(ctx, cred.JSON, ds.Endpoint, ds.Scopes, ds.Audiences) + return credentialsFromJSON(ctx, cred.JSON, ds) } // For GAE and GCE, the JSON is empty so return the default credentials directly. return cred, nil @@ -66,12 +66,12 @@ const ( // // - If the JSON is a service account and no scopes provided, returns self-signed JWT auth flow // - Otherwise, returns OAuth 2.0 flow. -func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scopes []string, audiences []string) (*google.Credentials, error) { - cred, err := google.CredentialsFromJSON(ctx, data, scopes...) +func credentialsFromJSON(ctx context.Context, data []byte, ds *DialSettings) (*google.Credentials, error) { + cred, err := google.CredentialsFromJSON(ctx, data, ds.GetScopes()...) if err != nil { return nil, err } - if len(data) > 0 && len(scopes) == 0 { + if len(data) > 0 && len(ds.Scopes) == 0 && (ds.DefaultAudience != "" || len(ds.Audiences) > 0) { var f struct { Type string `json:"type"` // The rest JSON fields are omitted because they are not used. @@ -80,7 +80,7 @@ func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scop return nil, err } if f.Type == serviceAccountKey { - ts, err := selfSignedJWTTokenSource(data, endpoint, audiences) + ts, err := selfSignedJWTTokenSource(data, ds.DefaultAudience, ds.Audiences) if err != nil { return nil, err } @@ -90,9 +90,8 @@ func credentialsFromJSON(ctx context.Context, data []byte, endpoint string, scop return cred, err } -func selfSignedJWTTokenSource(data []byte, endpoint string, audiences []string) (oauth2.TokenSource, error) { - // Use the API endpoint as the default audience - audience := endpoint +func selfSignedJWTTokenSource(data []byte, defaultAudience string, audiences []string) (oauth2.TokenSource, error) { + audience := defaultAudience if len(audiences) > 0 { // TODO(shinfan): Update golang oauth to support multiple audiences. if len(audiences) > 1 { @@ -118,7 +117,7 @@ func QuotaProjectFromCreds(cred *google.Credentials) string { func impersonateCredentials(ctx context.Context, creds *google.Credentials, ds *DialSettings) (*google.Credentials, error) { if len(ds.ImpersonationConfig.Scopes) == 0 { - ds.ImpersonationConfig.Scopes = ds.Scopes + ds.ImpersonationConfig.Scopes = ds.GetScopes() } ts, err := impersonate.TokenSource(ctx, creds.TokenSource, ds.ImpersonationConfig) if err != nil { diff --git a/vendor/google.golang.org/api/internal/settings.go b/vendor/google.golang.org/api/internal/settings.go index 7c0f9292d50..0ae1cb9778d 100644 --- a/vendor/google.golang.org/api/internal/settings.go +++ b/vendor/google.golang.org/api/internal/settings.go @@ -23,6 +23,7 @@ type DialSettings struct { DefaultEndpoint string DefaultMTLSEndpoint string Scopes []string + DefaultScopes []string TokenSource oauth2.TokenSource Credentials *google.Credentials CredentialsFile string // if set, Token Source is ignored. @@ -30,6 +31,7 @@ type DialSettings struct { UserAgent string APIKey string Audiences []string + DefaultAudience string HTTPClient *http.Client GRPCDialOpts []grpc.DialOption GRPCConn *grpc.ClientConn @@ -49,6 +51,15 @@ type DialSettings struct { RequestReason string } +// GetScopes returns the user-provided scopes, if set, or else falls back to the +// default scopes. +func (ds *DialSettings) GetScopes() []string { + if len(ds.Scopes) > 0 { + return ds.Scopes + } + return ds.DefaultScopes +} + // Validate reports an error if ds is invalid. func (ds *DialSettings) Validate() error { if ds.SkipValidation { diff --git a/vendor/google.golang.org/api/option/internaloption/internaloption.go b/vendor/google.golang.org/api/option/internaloption/internaloption.go index 6ee4501c048..1fff22fd5da 100644 --- a/vendor/google.golang.org/api/option/internaloption/internaloption.go +++ b/vendor/google.golang.org/api/option/internaloption/internaloption.go @@ -65,3 +65,32 @@ type enableDirectPath bool func (e enableDirectPath) Apply(o *internal.DialSettings) { o.EnableDirectPath = bool(e) } + +// WithDefaultAudience returns a ClientOption that specifies a default audience +// to be used as the audience field ("aud") for the JWT token authentication. +// +// It should only be used internally by generated clients. +func WithDefaultAudience(audience string) option.ClientOption { + return withDefaultAudience(audience) +} + +type withDefaultAudience string + +func (w withDefaultAudience) Apply(o *internal.DialSettings) { + o.DefaultAudience = string(w) +} + +// WithDefaultScopes returns a ClientOption that overrides the default OAuth2 +// scopes to be used for a service. +// +// It should only be used internally by generated clients. +func WithDefaultScopes(scope ...string) option.ClientOption { + return withDefaultScopes(scope) +} + +type withDefaultScopes []string + +func (w withDefaultScopes) Apply(o *internal.DialSettings) { + o.DefaultScopes = make([]string, len(w)) + copy(o.DefaultScopes, w) +} diff --git a/vendor/google.golang.org/api/storage/v1/storage-api.json b/vendor/google.golang.org/api/storage/v1/storage-api.json index 1e076ab66d4..dd283a07281 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-api.json +++ b/vendor/google.golang.org/api/storage/v1/storage-api.json @@ -26,7 +26,7 @@ "description": "Stores and retrieves potentially large, immutable data objects.", "discoveryVersion": "v1", "documentationLink": "https://developers.google.com/storage/docs/json_api/", - "etag": "\"3133373531323239383338313531333236393038\"", + "etag": "\"32313837343738383335383432353737343034\"", "icons": { "x16": "https://www.google.com/images/icons/product/cloud_storage-16.png", "x32": "https://www.google.com/images/icons/product/cloud_storage-32.png" @@ -36,6 +36,7 @@ "labels": [ "labs" ], + "mtlsRootUrl": "https://storage.mtls.googleapis.com/", "name": "storage", "ownerDomain": "google.com", "ownerName": "Google", @@ -3229,7 +3230,7 @@ } } }, - "revision": "20200927", + "revision": "20201112", "rootUrl": "https://storage.googleapis.com/", "schemas": { "Bucket": { @@ -3336,6 +3337,10 @@ }, "type": "object" }, + "publicAccessPrevention": { + "description": "The bucket's Public Access Prevention configuration. Currently, 'unspecified' and 'enforced' are supported.", + "type": "string" + }, "uniformBucketLevelAccess": { "description": "The bucket's uniform bucket-level access configuration.", "properties": { diff --git a/vendor/google.golang.org/api/storage/v1/storage-gen.go b/vendor/google.golang.org/api/storage/v1/storage-gen.go index 9e2483bbd59..10383bb2fae 100644 --- a/vendor/google.golang.org/api/storage/v1/storage-gen.go +++ b/vendor/google.golang.org/api/storage/v1/storage-gen.go @@ -81,6 +81,7 @@ const apiId = "storage:v1" const apiName = "storage" const apiVersion = "v1" const basePath = "https://storage.googleapis.com/storage/v1/" +const mtlsBasePath = "https://storage.mtls.googleapis.com/storage/v1/" // OAuth2 scopes used by this API. const ( @@ -112,6 +113,7 @@ func NewService(ctx context.Context, opts ...option.ClientOption) (*Service, err // NOTE: prepend, so we don't override user-specified scopes. opts = append([]option.ClientOption{scopesOption}, opts...) opts = append(opts, internaloption.WithDefaultEndpoint(basePath)) + opts = append(opts, internaloption.WithDefaultMTLSEndpoint(mtlsBasePath)) client, endpoint, err := htransport.NewClient(ctx, opts...) if err != nil { return nil, err @@ -540,6 +542,10 @@ type BucketIamConfiguration struct { // disable the feature. BucketPolicyOnly *BucketIamConfigurationBucketPolicyOnly `json:"bucketPolicyOnly,omitempty"` + // PublicAccessPrevention: The bucket's Public Access Prevention + // configuration. Currently, 'unspecified' and 'enforced' are supported. + PublicAccessPrevention string `json:"publicAccessPrevention,omitempty"` + // UniformBucketLevelAccess: The bucket's uniform bucket-level access // configuration. UniformBucketLevelAccess *BucketIamConfigurationUniformBucketLevelAccess `json:"uniformBucketLevelAccess,omitempty"` @@ -2440,7 +2446,7 @@ func (c *BucketAccessControlsDeleteCall) Header() http.Header { func (c *BucketAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2588,7 +2594,7 @@ func (c *BucketAccessControlsGetCall) Header() http.Header { func (c *BucketAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2755,7 +2761,7 @@ func (c *BucketAccessControlsInsertCall) Header() http.Header { func (c *BucketAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -2928,7 +2934,7 @@ func (c *BucketAccessControlsListCall) Header() http.Header { func (c *BucketAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3089,7 +3095,7 @@ func (c *BucketAccessControlsPatchCall) Header() http.Header { func (c *BucketAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3263,7 +3269,7 @@ func (c *BucketAccessControlsUpdateCall) Header() http.Header { func (c *BucketAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3449,7 +3455,7 @@ func (c *BucketsDeleteCall) Header() http.Header { func (c *BucketsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3628,7 +3634,7 @@ func (c *BucketsGetCall) Header() http.Header { func (c *BucketsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -3834,7 +3840,7 @@ func (c *BucketsGetIamPolicyCall) Header() http.Header { func (c *BucketsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4051,7 +4057,7 @@ func (c *BucketsInsertCall) Header() http.Header { func (c *BucketsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4308,7 +4314,7 @@ func (c *BucketsListCall) Header() http.Header { func (c *BucketsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4518,7 +4524,7 @@ func (c *BucketsLockRetentionPolicyCall) Header() http.Header { func (c *BucketsLockRetentionPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4753,7 +4759,7 @@ func (c *BucketsPatchCall) Header() http.Header { func (c *BucketsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -4982,7 +4988,7 @@ func (c *BucketsSetIamPolicyCall) Header() http.Header { func (c *BucketsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5157,7 +5163,7 @@ func (c *BucketsTestIamPermissionsCall) Header() http.Header { func (c *BucketsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5397,7 +5403,7 @@ func (c *BucketsUpdateCall) Header() http.Header { func (c *BucketsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5609,7 +5615,7 @@ func (c *ChannelsStopCall) Header() http.Header { func (c *ChannelsStopCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5726,7 +5732,7 @@ func (c *DefaultObjectAccessControlsDeleteCall) Header() http.Header { func (c *DefaultObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -5874,7 +5880,7 @@ func (c *DefaultObjectAccessControlsGetCall) Header() http.Header { func (c *DefaultObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6042,7 +6048,7 @@ func (c *DefaultObjectAccessControlsInsertCall) Header() http.Header { func (c *DefaultObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6232,7 +6238,7 @@ func (c *DefaultObjectAccessControlsListCall) Header() http.Header { func (c *DefaultObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6405,7 +6411,7 @@ func (c *DefaultObjectAccessControlsPatchCall) Header() http.Header { func (c *DefaultObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6579,7 +6585,7 @@ func (c *DefaultObjectAccessControlsUpdateCall) Header() http.Header { func (c *DefaultObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6751,7 +6757,7 @@ func (c *NotificationsDeleteCall) Header() http.Header { func (c *NotificationsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -6899,7 +6905,7 @@ func (c *NotificationsGetCall) Header() http.Header { func (c *NotificationsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7069,7 +7075,7 @@ func (c *NotificationsInsertCall) Header() http.Header { func (c *NotificationsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7244,7 +7250,7 @@ func (c *NotificationsListCall) Header() http.Header { func (c *NotificationsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7417,7 +7423,7 @@ func (c *ObjectAccessControlsDeleteCall) Header() http.Header { func (c *ObjectAccessControlsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7589,7 +7595,7 @@ func (c *ObjectAccessControlsGetCall) Header() http.Header { func (c *ObjectAccessControlsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7780,7 +7786,7 @@ func (c *ObjectAccessControlsInsertCall) Header() http.Header { func (c *ObjectAccessControlsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -7977,7 +7983,7 @@ func (c *ObjectAccessControlsListCall) Header() http.Header { func (c *ObjectAccessControlsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8162,7 +8168,7 @@ func (c *ObjectAccessControlsPatchCall) Header() http.Header { func (c *ObjectAccessControlsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8360,7 +8366,7 @@ func (c *ObjectAccessControlsUpdateCall) Header() http.Header { func (c *ObjectAccessControlsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8597,7 +8603,7 @@ func (c *ObjectsComposeCall) Header() http.Header { func (c *ObjectsComposeCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -8940,7 +8946,7 @@ func (c *ObjectsCopyCall) Header() http.Header { func (c *ObjectsCopyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9268,7 +9274,7 @@ func (c *ObjectsDeleteCall) Header() http.Header { func (c *ObjectsDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9501,7 +9507,7 @@ func (c *ObjectsGetCall) Header() http.Header { func (c *ObjectsGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -9751,7 +9757,7 @@ func (c *ObjectsGetIamPolicyCall) Header() http.Header { func (c *ObjectsGetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10068,7 +10074,7 @@ func (c *ObjectsInsertCall) Header() http.Header { func (c *ObjectsInsertCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10441,7 +10447,7 @@ func (c *ObjectsListCall) Header() http.Header { func (c *ObjectsListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -10758,7 +10764,7 @@ func (c *ObjectsPatchCall) Header() http.Header { func (c *ObjectsPatchCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11150,7 +11156,7 @@ func (c *ObjectsRewriteCall) Header() http.Header { func (c *ObjectsRewriteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11453,7 +11459,7 @@ func (c *ObjectsSetIamPolicyCall) Header() http.Header { func (c *ObjectsSetIamPolicyCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11653,7 +11659,7 @@ func (c *ObjectsTestIamPermissionsCall) Header() http.Header { func (c *ObjectsTestIamPermissionsCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -11914,7 +11920,7 @@ func (c *ObjectsUpdateCall) Header() http.Header { func (c *ObjectsUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12232,7 +12238,7 @@ func (c *ObjectsWatchAllCall) Header() http.Header { func (c *ObjectsWatchAllCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12448,7 +12454,7 @@ func (c *ProjectsHmacKeysCreateCall) Header() http.Header { func (c *ProjectsHmacKeysCreateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12598,7 +12604,7 @@ func (c *ProjectsHmacKeysDeleteCall) Header() http.Header { func (c *ProjectsHmacKeysDeleteCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12733,7 +12739,7 @@ func (c *ProjectsHmacKeysGetCall) Header() http.Header { func (c *ProjectsHmacKeysGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -12933,7 +12939,7 @@ func (c *ProjectsHmacKeysListCall) Header() http.Header { func (c *ProjectsHmacKeysListCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13128,7 +13134,7 @@ func (c *ProjectsHmacKeysUpdateCall) Header() http.Header { func (c *ProjectsHmacKeysUpdateCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } @@ -13305,7 +13311,7 @@ func (c *ProjectsServiceAccountGetCall) Header() http.Header { func (c *ProjectsServiceAccountGetCall) doRequest(alt string) (*http.Response, error) { reqHeaders := make(http.Header) - reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201105") + reqHeaders.Set("x-goog-api-client", "gl-go/"+gensupport.GoVersion()+" gdcl/20201124") for k, v := range c.header_ { reqHeaders[k] = v } diff --git a/vendor/google.golang.org/api/transport/cert/default_cert.go b/vendor/google.golang.org/api/transport/cert/default_cert.go index c03af65fd73..141ae457936 100644 --- a/vendor/google.golang.org/api/transport/cert/default_cert.go +++ b/vendor/google.golang.org/api/transport/cert/default_cert.go @@ -14,6 +14,7 @@ package cert import ( "crypto/tls" + "crypto/x509" "encoding/json" "errors" "fmt" @@ -23,6 +24,7 @@ import ( "os/user" "path/filepath" "sync" + "time" ) const ( @@ -30,10 +32,18 @@ const ( metadataFile = "context_aware_metadata.json" ) +// defaultCertData holds all the variables pertaining to +// the default certficate source created by DefaultSource. +type defaultCertData struct { + once sync.Once + source Source + err error + cachedCertMutex sync.Mutex + cachedCert *tls.Certificate +} + var ( - defaultSourceOnce sync.Once - defaultSource Source - defaultSourceErr error + defaultCert defaultCertData ) // Source is a function that can be passed into crypto/tls.Config.GetClientCertificate. @@ -44,10 +54,10 @@ type Source func(*tls.CertificateRequestInfo) (*tls.Certificate, error) // // If that file does not exist, a nil source is returned. func DefaultSource() (Source, error) { - defaultSourceOnce.Do(func() { - defaultSource, defaultSourceErr = newSecureConnectSource() + defaultCert.once.Do(func() { + defaultCert.source, defaultCert.err = newSecureConnectSource() }) - return defaultSource, defaultSourceErr + return defaultCert.source, defaultCert.err } type secureConnectSource struct { @@ -95,7 +105,11 @@ func validateMetadata(metadata secureConnectMetadata) error { } func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestInfo) (*tls.Certificate, error) { - // TODO(cbro): consider caching valid certificates rather than exec'ing every time. + defaultCert.cachedCertMutex.Lock() + defer defaultCert.cachedCertMutex.Unlock() + if defaultCert.cachedCert != nil && !isCertificateExpired(defaultCert.cachedCert) { + return defaultCert.cachedCert, nil + } command := s.metadata.Cmd data, err := exec.Command(command[0], command[1:]...).Output() if err != nil { @@ -106,5 +120,18 @@ func (s *secureConnectSource) getClientCertificate(info *tls.CertificateRequestI if err != nil { return nil, err } + defaultCert.cachedCert = &cert return &cert, nil } + +// isCertificateExpired returns true if the given cert is expired or invalid. +func isCertificateExpired(cert *tls.Certificate) bool { + if len(cert.Certificate) == 0 { + return true + } + parsed, err := x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return true + } + return time.Now().After(parsed.NotAfter) +} diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go index 5cedf042c32..ca78f7881f9 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_instance_admin.pb.go @@ -1,4 +1,4 @@ -// Copyright 2019 Google LLC. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -11,7 +11,6 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. -// // Code generated by protoc-gen-go. DO NOT EDIT. // versions: @@ -1356,7 +1355,7 @@ type DeleteAppProfileRequest struct { // Required. The unique name of the app profile to be deleted. Values are of the form // `projects/{project}/instances/{instance}/appProfiles/{app_profile}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // If true, ignore safety checks when deleting the app profile. + // Required. If true, ignore safety checks when deleting the app profile. IgnoreWarnings bool `protobuf:"varint,2,opt,name=ignore_warnings,json=ignoreWarnings,proto3" json:"ignore_warnings,omitempty"` } @@ -1697,273 +1696,273 @@ var file_google_bigtable_admin_v2_bigtable_instance_admin_proto_rawDesc = []byte 0x74, 0x65, 0x4d, 0x61, 0x73, 0x6b, 0x12, 0x27, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, - 0x82, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, + 0x87, 0x01, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3e, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x2a, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x24, 0x0a, 0x22, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x41, 0x70, 0x70, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x69, + 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x2c, 0x0a, 0x0f, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x5f, 0x77, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, 0x65, 0x57, 0x61, 0x72, 0x6e, - 0x69, 0x6e, 0x67, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, - 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x32, 0x92, 0x1e, 0x0a, 0x15, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xda, 0x01, 0x0a, 0x0e, 0x43, - 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, - 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x69, 0x64, - 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0xca, 0x41, 0x22, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, - 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, - 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x91, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x49, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x20, 0x01, 0x28, 0x08, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0e, 0x69, 0x67, 0x6e, 0x6f, 0x72, + 0x65, 0x57, 0x61, 0x72, 0x6e, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x1a, 0x0a, 0x18, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, + 0x61, 0x64, 0x61, 0x74, 0x61, 0x32, 0x92, 0x1e, 0x0a, 0x15, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, + 0xda, 0x01, 0x0a, 0x0e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, + 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x22, 0x78, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x22, 0x21, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x3a, 0x01, 0x2a, + 0xda, 0x41, 0x24, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0xca, 0x41, 0x22, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x91, 0x01, 0x0a, + 0x0b, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2c, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x30, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0xa4, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x32, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0xda, 0x41, + 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x86, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x22, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x1a, 0x21, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, + 0x12, 0xe8, 0x01, 0x0a, 0x15, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, + 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, + 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, + 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x22, 0x78, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x36, 0x32, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0xda, + 0x41, 0x14, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, + 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x22, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x12, 0x16, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8b, 0x01, 0x0a, 0x0e, + 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, + 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, + 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x2a, + 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xdc, 0x01, 0x0a, 0x0d, 0x43, 0x72, + 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, + 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x37, 0x22, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x3a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x12, 0x15, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x99, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, + 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xac, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, + 0x73, 0x74, 0x65, 0x72, 0x73, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x23, 0x12, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xa4, 0x01, 0x0a, 0x0d, - 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x2e, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x12, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x12, 0x86, 0x01, 0x0a, 0x0e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, + 0x65, 0x6e, 0x74, 0x12, 0xad, 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x2c, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x1a, 0x21, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x15, - 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x36, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, + 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x1a, + 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, + 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x55, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x94, 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x50, 0x61, 0x72, 0x74, 0x69, 0x61, 0x6c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, - 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x78, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x36, 0x32, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x69, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3b, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x2a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, + 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, + 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xd5, 0x01, 0x0a, 0x10, 0x43, + 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, + 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, + 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, + 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x68, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, + 0x22, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, + 0x73, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, + 0x21, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, + 0x6c, 0x65, 0x12, 0xa5, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, + 0x66, 0x69, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, + 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, + 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb8, 0x01, 0x0a, 0x0f, 0x4c, + 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x30, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, + 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x22, 0x40, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x3a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0xda, 0x41, 0x14, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, - 0x6b, 0xca, 0x41, 0x22, 0x0a, 0x08, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, - 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4d, 0x65, - 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x8b, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, - 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, + 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, + 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xfa, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, + 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, + 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, + 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x93, 0x01, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x4a, 0x32, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, + 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x0b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, + 0x41, 0x17, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2c, 0x75, 0x70, + 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x26, 0x0a, 0x0a, 0x41, 0x70, + 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, + 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, + 0x74, 0x61, 0x12, 0x9d, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x23, 0x2a, 0x21, 0x2f, 0x76, 0x32, 0x2f, + 0x74, 0x79, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x2a, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xdc, 0x01, 0x0a, 0x0d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x7c, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x2c, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, - 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x3a, 0x07, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0xda, 0x41, 0x19, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x63, 0x6c, - 0x75, 0x73, 0x74, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0xca, 0x41, 0x20, 0x0a, 0x07, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0x99, 0x01, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, - 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, 0x74, - 0x65, 0x72, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0xac, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, - 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x3d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, 0x12, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xad, - 0x01, 0x0a, 0x0d, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x21, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, - 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x22, 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x1a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, - 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x20, 0x0a, 0x07, - 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x12, 0x15, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x43, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x94, - 0x01, 0x0a, 0x0d, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, - 0x74, 0x65, 0x43, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2e, - 0x2a, 0x2c, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, + 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x93, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, + 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, + 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, + 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, + 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x22, 0x4f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, - 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xd5, 0x01, 0x0a, 0x10, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x22, 0x68, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3e, 0x22, 0x2f, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, + 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, + 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x22, 0x5a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3d, 0x22, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x3a, 0x0b, 0x61, 0x70, - 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, 0x21, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x2c, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0xa5, 0x01, - 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x70, - 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x41, 0x70, 0x70, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x3e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, 0x2f, - 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, - 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, - 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb8, 0x01, 0x0a, 0x0f, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, - 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x12, 0x30, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x31, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x70, 0x70, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x40, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x12, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, - 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x61, 0x70, 0x70, 0x50, - 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, - 0x12, 0xfa, 0x01, 0x0a, 0x10, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x93, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x4a, - 0x32, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x61, 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, - 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x0b, 0x61, - 0x70, 0x70, 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0xda, 0x41, 0x17, 0x61, 0x70, 0x70, - 0x5f, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, - 0x6d, 0x61, 0x73, 0x6b, 0xca, 0x41, 0x26, 0x0a, 0x0a, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, - 0x69, 0x6c, 0x65, 0x12, 0x18, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, - 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x9d, 0x01, - 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, - 0x6c, 0x65, 0x74, 0x65, 0x41, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3e, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x31, 0x2a, 0x2f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x61, 0x70, 0x70, 0x50, 0x72, 0x6f, 0x66, 0x69, - 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x93, 0x01, - 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, - 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x48, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, - 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, - 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x12, 0x9a, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, - 0x4f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, - 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, - 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, - 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0xc5, 0x01, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, - 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, - 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, - 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5a, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x3d, 0x22, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, - 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x9a, 0x03, 0xca, 0x41, 0x1c, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xf7, 0x02, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x63, 0x6c, 0x75, 0x73, - 0x74, 0x65, 0x72, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, - 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, - 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, - 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x9a, 0x03, + 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, + 0x41, 0xf7, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, + 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x68, 0x74, - 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, - 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, - 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, - 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, - 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, - 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xe2, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x1a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, - 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, - 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, - 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, - 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, - 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, - 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, - 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x63, 0x6c, 0x75, 0x73, 0x74, + 0x65, 0x72, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, + 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, + 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, + 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, + 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xe2, 0x01, 0x0a, 0x1c, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x1a, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x41, 0x64, 0x6d, + 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, + 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go index f4986711024..afea80bd17e 100644 --- a/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/bigtable/admin/v2/bigtable_table_admin.pb.go @@ -51,43 +51,31 @@ const ( // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] -type CreateTableRequest struct { +// The request for +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +type RestoreTableRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance in which to create the table. - // Values are of the form `projects/{project}/instances/{instance}`. + // Required. The name of the instance in which to create the restored + // table. This instance must be the parent of the source backup. Values are + // of the form `projects//instances/`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. - // Maximum 50 characters. + // Required. The id of the table to create and restore to. This + // table must not already exist. The `table_id` appended to + // `parent` forms the full table name of the form + // `projects//instances//tables/`. TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` - // Required. The Table to create. - Table *Table `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` - // The optional list of row keys that will be used to initially split the - // table into several tablets (tablets are similar to HBase regions). - // Given two split keys, `s1` and `s2`, three tablets will be created, - // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. - // - // Example: + // Required. The source from which to restore. // - // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` - // `"other", "zz"]` - // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` - // * Key assignment: - // - Tablet 1 `[, apple) => {"a"}.` - // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` - // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` - // - Tablet 4 `[customer_2, other) => {"customer_2"}.` - // - Tablet 5 `[other, ) => {"other", "zz"}.` - InitialSplits []*CreateTableRequest_Split `protobuf:"bytes,4,rep,name=initial_splits,json=initialSplits,proto3" json:"initial_splits,omitempty"` + // Types that are assignable to Source: + // *RestoreTableRequest_Backup + Source isRestoreTableRequest_Source `protobuf_oneof:"source"` } -func (x *CreateTableRequest) Reset() { - *x = CreateTableRequest{} +func (x *RestoreTableRequest) Reset() { + *x = RestoreTableRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -95,13 +83,13 @@ func (x *CreateTableRequest) Reset() { } } -func (x *CreateTableRequest) String() string { +func (x *RestoreTableRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateTableRequest) ProtoMessage() {} +func (*RestoreTableRequest) ProtoMessage() {} -func (x *CreateTableRequest) ProtoReflect() protoreflect.Message { +func (x *RestoreTableRequest) ProtoReflect() protoreflect.Message { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -113,66 +101,85 @@ func (x *CreateTableRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateTableRequest.ProtoReflect.Descriptor instead. -func (*CreateTableRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RestoreTableRequest.ProtoReflect.Descriptor instead. +func (*RestoreTableRequest) Descriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{0} } -func (x *CreateTableRequest) GetParent() string { +func (x *RestoreTableRequest) GetParent() string { if x != nil { return x.Parent } return "" } -func (x *CreateTableRequest) GetTableId() string { +func (x *RestoreTableRequest) GetTableId() string { if x != nil { return x.TableId } return "" } -func (x *CreateTableRequest) GetTable() *Table { - if x != nil { - return x.Table +func (m *RestoreTableRequest) GetSource() isRestoreTableRequest_Source { + if m != nil { + return m.Source } return nil } -func (x *CreateTableRequest) GetInitialSplits() []*CreateTableRequest_Split { - if x != nil { - return x.InitialSplits +func (x *RestoreTableRequest) GetBackup() string { + if x, ok := x.GetSource().(*RestoreTableRequest_Backup); ok { + return x.Backup } - return nil + return "" } -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] -// -// Note: This is a private alpha release of Cloud Bigtable snapshots. This -// feature is not currently available to most Cloud Bigtable customers. This -// feature might be changed in backward-incompatible ways and is not recommended -// for production use. It is not subject to any SLA or deprecation policy. -type CreateTableFromSnapshotRequest struct { +type isRestoreTableRequest_Source interface { + isRestoreTableRequest_Source() +} + +type RestoreTableRequest_Backup struct { + // Name of the backup from which to restore. Values are of the form + // `projects//instances//clusters//backups/`. + Backup string `protobuf:"bytes,3,opt,name=backup,proto3,oneof"` +} + +func (*RestoreTableRequest_Backup) isRestoreTableRequest_Source() {} + +// Metadata type for the long-running operation returned by +// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. +type RestoreTableMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance in which to create the table. - // Values are of the form `projects/{project}/instances/{instance}`. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The name by which the new table should be referred to within the - // parent instance, e.g., `foobar` rather than `{parent}/tables/foobar`. - TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` - // Required. The unique name of the snapshot from which to restore the table. - // The snapshot and the table must be in the same instance. Values are of the - // form - // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. - SourceSnapshot string `protobuf:"bytes,3,opt,name=source_snapshot,json=sourceSnapshot,proto3" json:"source_snapshot,omitempty"` + // Name of the table being created and restored to. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The type of the restore source. + SourceType RestoreSourceType `protobuf:"varint,2,opt,name=source_type,json=sourceType,proto3,enum=google.bigtable.admin.v2.RestoreSourceType" json:"source_type,omitempty"` + // Information about the source used to restore the table, as specified by + // `source` in [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. + // + // Types that are assignable to SourceInfo: + // *RestoreTableMetadata_BackupInfo + SourceInfo isRestoreTableMetadata_SourceInfo `protobuf_oneof:"source_info"` + // If exists, the name of the long-running operation that will be used to + // track the post-restore optimization process to optimize the performance of + // the restored table. The metadata type of the long-running operation is + // [OptimizeRestoreTableMetadata][]. The response type is + // [Empty][google.protobuf.Empty]. This long-running operation may be + // automatically created by the system if applicable after the + // RestoreTable long-running operation completes successfully. This operation + // may not be created if the table is already optimized or the restore was + // not successful. + OptimizeTableOperationName string `protobuf:"bytes,4,opt,name=optimize_table_operation_name,json=optimizeTableOperationName,proto3" json:"optimize_table_operation_name,omitempty"` + // The progress of the [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] + // operation. + Progress *OperationProgress `protobuf:"bytes,5,opt,name=progress,proto3" json:"progress,omitempty"` } -func (x *CreateTableFromSnapshotRequest) Reset() { - *x = CreateTableFromSnapshotRequest{} +func (x *RestoreTableMetadata) Reset() { + *x = RestoreTableMetadata{} if protoimpl.UnsafeEnabled { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -180,13 +187,13 @@ func (x *CreateTableFromSnapshotRequest) Reset() { } } -func (x *CreateTableFromSnapshotRequest) String() string { +func (x *RestoreTableMetadata) String() string { return protoimpl.X.MessageStringOf(x) } -func (*CreateTableFromSnapshotRequest) ProtoMessage() {} +func (*RestoreTableMetadata) ProtoMessage() {} -func (x *CreateTableFromSnapshotRequest) ProtoReflect() protoreflect.Message { +func (x *RestoreTableMetadata) ProtoReflect() protoreflect.Message { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -198,53 +205,80 @@ func (x *CreateTableFromSnapshotRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use CreateTableFromSnapshotRequest.ProtoReflect.Descriptor instead. -func (*CreateTableFromSnapshotRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use RestoreTableMetadata.ProtoReflect.Descriptor instead. +func (*RestoreTableMetadata) Descriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{1} } -func (x *CreateTableFromSnapshotRequest) GetParent() string { +func (x *RestoreTableMetadata) GetName() string { if x != nil { - return x.Parent + return x.Name } return "" } -func (x *CreateTableFromSnapshotRequest) GetTableId() string { +func (x *RestoreTableMetadata) GetSourceType() RestoreSourceType { if x != nil { - return x.TableId + return x.SourceType } - return "" + return RestoreSourceType_RESTORE_SOURCE_TYPE_UNSPECIFIED } -func (x *CreateTableFromSnapshotRequest) GetSourceSnapshot() string { +func (m *RestoreTableMetadata) GetSourceInfo() isRestoreTableMetadata_SourceInfo { + if m != nil { + return m.SourceInfo + } + return nil +} + +func (x *RestoreTableMetadata) GetBackupInfo() *BackupInfo { + if x, ok := x.GetSourceInfo().(*RestoreTableMetadata_BackupInfo); ok { + return x.BackupInfo + } + return nil +} + +func (x *RestoreTableMetadata) GetOptimizeTableOperationName() string { if x != nil { - return x.SourceSnapshot + return x.OptimizeTableOperationName } return "" } -// Request message for -// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] -type DropRowRangeRequest struct { +func (x *RestoreTableMetadata) GetProgress() *OperationProgress { + if x != nil { + return x.Progress + } + return nil +} + +type isRestoreTableMetadata_SourceInfo interface { + isRestoreTableMetadata_SourceInfo() +} + +type RestoreTableMetadata_BackupInfo struct { + BackupInfo *BackupInfo `protobuf:"bytes,3,opt,name=backup_info,json=backupInfo,proto3,oneof"` +} + +func (*RestoreTableMetadata_BackupInfo) isRestoreTableMetadata_SourceInfo() {} + +// Metadata type for the long-running operation used to track the progress +// of optimizations performed on a newly restored table. This long-running +// operation is automatically created by the system after the successful +// completion of a table restore, and cannot be cancelled. +type OptimizeRestoredTableMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the table on which to drop a range of rows. - // Values are of the form - // `projects/{project}/instances/{instance}/tables/{table}`. + // Name of the restored table being optimized. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Delete all rows or by prefix. - // - // Types that are assignable to Target: - // *DropRowRangeRequest_RowKeyPrefix - // *DropRowRangeRequest_DeleteAllDataFromTable - Target isDropRowRangeRequest_Target `protobuf_oneof:"target"` + // The progress of the post-restore optimizations. + Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"` } -func (x *DropRowRangeRequest) Reset() { - *x = DropRowRangeRequest{} +func (x *OptimizeRestoredTableMetadata) Reset() { + *x = OptimizeRestoredTableMetadata{} if protoimpl.UnsafeEnabled { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -252,13 +286,13 @@ func (x *DropRowRangeRequest) Reset() { } } -func (x *DropRowRangeRequest) String() string { +func (x *OptimizeRestoredTableMetadata) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DropRowRangeRequest) ProtoMessage() {} +func (*OptimizeRestoredTableMetadata) ProtoMessage() {} -func (x *DropRowRangeRequest) ProtoReflect() protoreflect.Message { +func (x *OptimizeRestoredTableMetadata) ProtoReflect() protoreflect.Message { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -270,30 +304,268 @@ func (x *DropRowRangeRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DropRowRangeRequest.ProtoReflect.Descriptor instead. -func (*DropRowRangeRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use OptimizeRestoredTableMetadata.ProtoReflect.Descriptor instead. +func (*OptimizeRestoredTableMetadata) Descriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{2} } -func (x *DropRowRangeRequest) GetName() string { +func (x *OptimizeRestoredTableMetadata) GetName() string { if x != nil { return x.Name } return "" } -func (m *DropRowRangeRequest) GetTarget() isDropRowRangeRequest_Target { - if m != nil { - return m.Target +func (x *OptimizeRestoredTableMetadata) GetProgress() *OperationProgress { + if x != nil { + return x.Progress } return nil } -func (x *DropRowRangeRequest) GetRowKeyPrefix() []byte { - if x, ok := x.GetTarget().(*DropRowRangeRequest_RowKeyPrefix); ok { - return x.RowKeyPrefix - } - return nil +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTable][google.bigtable.admin.v2.BigtableTableAdmin.CreateTable] +type CreateTableRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The unique name of the instance in which to create the table. + // Values are of the form `projects/{project}/instances/{instance}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + // Maximum 50 characters. + TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` + // Required. The Table to create. + Table *Table `protobuf:"bytes,3,opt,name=table,proto3" json:"table,omitempty"` + // The optional list of row keys that will be used to initially split the + // table into several tablets (tablets are similar to HBase regions). + // Given two split keys, `s1` and `s2`, three tablets will be created, + // spanning the key ranges: `[, s1), [s1, s2), [s2, )`. + // + // Example: + // + // * Row keys := `["a", "apple", "custom", "customer_1", "customer_2",` + // `"other", "zz"]` + // * initial_split_keys := `["apple", "customer_1", "customer_2", "other"]` + // * Key assignment: + // - Tablet 1 `[, apple) => {"a"}.` + // - Tablet 2 `[apple, customer_1) => {"apple", "custom"}.` + // - Tablet 3 `[customer_1, customer_2) => {"customer_1"}.` + // - Tablet 4 `[customer_2, other) => {"customer_2"}.` + // - Tablet 5 `[other, ) => {"other", "zz"}.` + InitialSplits []*CreateTableRequest_Split `protobuf:"bytes,4,rep,name=initial_splits,json=initialSplits,proto3" json:"initial_splits,omitempty"` +} + +func (x *CreateTableRequest) Reset() { + *x = CreateTableRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTableRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTableRequest) ProtoMessage() {} + +func (x *CreateTableRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTableRequest.ProtoReflect.Descriptor instead. +func (*CreateTableRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{3} +} + +func (x *CreateTableRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateTableRequest) GetTableId() string { + if x != nil { + return x.TableId + } + return "" +} + +func (x *CreateTableRequest) GetTable() *Table { + if x != nil { + return x.Table + } + return nil +} + +func (x *CreateTableRequest) GetInitialSplits() []*CreateTableRequest_Split { + if x != nil { + return x.InitialSplits + } + return nil +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot][google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot] +// +// Note: This is a private alpha release of Cloud Bigtable snapshots. This +// feature is not currently available to most Cloud Bigtable customers. This +// feature might be changed in backward-incompatible ways and is not recommended +// for production use. It is not subject to any SLA or deprecation policy. +type CreateTableFromSnapshotRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The unique name of the instance in which to create the table. + // Values are of the form `projects/{project}/instances/{instance}`. + Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` + // Required. The name by which the new table should be referred to within the parent + // instance, e.g., `foobar` rather than `{parent}/tables/foobar`. + TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` + // Required. The unique name of the snapshot from which to restore the table. The + // snapshot and the table must be in the same instance. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/{snapshot}`. + SourceSnapshot string `protobuf:"bytes,3,opt,name=source_snapshot,json=sourceSnapshot,proto3" json:"source_snapshot,omitempty"` +} + +func (x *CreateTableFromSnapshotRequest) Reset() { + *x = CreateTableFromSnapshotRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *CreateTableFromSnapshotRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateTableFromSnapshotRequest) ProtoMessage() {} + +func (x *CreateTableFromSnapshotRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateTableFromSnapshotRequest.ProtoReflect.Descriptor instead. +func (*CreateTableFromSnapshotRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{4} +} + +func (x *CreateTableFromSnapshotRequest) GetParent() string { + if x != nil { + return x.Parent + } + return "" +} + +func (x *CreateTableFromSnapshotRequest) GetTableId() string { + if x != nil { + return x.TableId + } + return "" +} + +func (x *CreateTableFromSnapshotRequest) GetSourceSnapshot() string { + if x != nil { + return x.SourceSnapshot + } + return "" +} + +// Request message for +// [google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange][google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange] +type DropRowRangeRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Required. The unique name of the table on which to drop a range of rows. + // Values are of the form + // `projects/{project}/instances/{instance}/tables/{table}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Delete all rows or by prefix. + // + // Types that are assignable to Target: + // *DropRowRangeRequest_RowKeyPrefix + // *DropRowRangeRequest_DeleteAllDataFromTable + Target isDropRowRangeRequest_Target `protobuf_oneof:"target"` +} + +func (x *DropRowRangeRequest) Reset() { + *x = DropRowRangeRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *DropRowRangeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DropRowRangeRequest) ProtoMessage() {} + +func (x *DropRowRangeRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DropRowRangeRequest.ProtoReflect.Descriptor instead. +func (*DropRowRangeRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{5} +} + +func (x *DropRowRangeRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (m *DropRowRangeRequest) GetTarget() isDropRowRangeRequest_Target { + if m != nil { + return m.Target + } + return nil +} + +func (x *DropRowRangeRequest) GetRowKeyPrefix() []byte { + if x, ok := x.GetTarget().(*DropRowRangeRequest_RowKeyPrefix); ok { + return x.RowKeyPrefix + } + return nil } func (x *DropRowRangeRequest) GetDeleteAllDataFromTable() bool { @@ -329,8 +601,8 @@ type ListTablesRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the instance for which tables should be - // listed. Values are of the form `projects/{project}/instances/{instance}`. + // Required. The unique name of the instance for which tables should be listed. + // Values are of the form `projects/{project}/instances/{instance}`. Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` // The view to be applied to the returned tables' fields. // Only NAME_ONLY view (default) and REPLICATION_VIEW are supported. @@ -352,7 +624,7 @@ type ListTablesRequest struct { func (x *ListTablesRequest) Reset() { *x = ListTablesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -365,7 +637,7 @@ func (x *ListTablesRequest) String() string { func (*ListTablesRequest) ProtoMessage() {} func (x *ListTablesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -378,7 +650,7 @@ func (x *ListTablesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTablesRequest.ProtoReflect.Descriptor instead. func (*ListTablesRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{3} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{6} } func (x *ListTablesRequest) GetParent() string { @@ -427,7 +699,7 @@ type ListTablesResponse struct { func (x *ListTablesResponse) Reset() { *x = ListTablesResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -440,7 +712,7 @@ func (x *ListTablesResponse) String() string { func (*ListTablesResponse) ProtoMessage() {} func (x *ListTablesResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -453,7 +725,7 @@ func (x *ListTablesResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListTablesResponse.ProtoReflect.Descriptor instead. func (*ListTablesResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{4} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{7} } func (x *ListTablesResponse) GetTables() []*Table { @@ -489,7 +761,7 @@ type GetTableRequest struct { func (x *GetTableRequest) Reset() { *x = GetTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -502,7 +774,7 @@ func (x *GetTableRequest) String() string { func (*GetTableRequest) ProtoMessage() {} func (x *GetTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -515,7 +787,7 @@ func (x *GetTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetTableRequest.ProtoReflect.Descriptor instead. func (*GetTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{5} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{8} } func (x *GetTableRequest) GetName() string { @@ -548,7 +820,7 @@ type DeleteTableRequest struct { func (x *DeleteTableRequest) Reset() { *x = DeleteTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -561,7 +833,7 @@ func (x *DeleteTableRequest) String() string { func (*DeleteTableRequest) ProtoMessage() {} func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -574,7 +846,7 @@ func (x *DeleteTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteTableRequest.ProtoReflect.Descriptor instead. func (*DeleteTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{6} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{9} } func (x *DeleteTableRequest) GetName() string { @@ -595,17 +867,17 @@ type ModifyColumnFamiliesRequest struct { // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // Required. Modifications to be atomically applied to the specified table's - // families. Entries are applied in order, meaning that earlier modifications - // can be masked by later ones (in the case of repeated updates to the same - // family, for example). + // Required. Modifications to be atomically applied to the specified table's families. + // Entries are applied in order, meaning that earlier modifications can be + // masked by later ones (in the case of repeated updates to the same family, + // for example). Modifications []*ModifyColumnFamiliesRequest_Modification `protobuf:"bytes,2,rep,name=modifications,proto3" json:"modifications,omitempty"` } func (x *ModifyColumnFamiliesRequest) Reset() { *x = ModifyColumnFamiliesRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -618,7 +890,7 @@ func (x *ModifyColumnFamiliesRequest) String() string { func (*ModifyColumnFamiliesRequest) ProtoMessage() {} func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -631,7 +903,7 @@ func (x *ModifyColumnFamiliesRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ModifyColumnFamiliesRequest.ProtoReflect.Descriptor instead. func (*ModifyColumnFamiliesRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{7} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10} } func (x *ModifyColumnFamiliesRequest) GetName() string { @@ -655,8 +927,8 @@ type GenerateConsistencyTokenRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the Table for which to create a consistency - // token. Values are of the form + // Required. The unique name of the Table for which to create a consistency token. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } @@ -664,7 +936,7 @@ type GenerateConsistencyTokenRequest struct { func (x *GenerateConsistencyTokenRequest) Reset() { *x = GenerateConsistencyTokenRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -677,7 +949,7 @@ func (x *GenerateConsistencyTokenRequest) String() string { func (*GenerateConsistencyTokenRequest) ProtoMessage() {} func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -690,7 +962,7 @@ func (x *GenerateConsistencyTokenRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateConsistencyTokenRequest.ProtoReflect.Descriptor instead. func (*GenerateConsistencyTokenRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{8} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{11} } func (x *GenerateConsistencyTokenRequest) GetName() string { @@ -714,7 +986,7 @@ type GenerateConsistencyTokenResponse struct { func (x *GenerateConsistencyTokenResponse) Reset() { *x = GenerateConsistencyTokenResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -727,7 +999,7 @@ func (x *GenerateConsistencyTokenResponse) String() string { func (*GenerateConsistencyTokenResponse) ProtoMessage() {} func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -740,7 +1012,7 @@ func (x *GenerateConsistencyTokenResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GenerateConsistencyTokenResponse.ProtoReflect.Descriptor instead. func (*GenerateConsistencyTokenResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{9} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{12} } func (x *GenerateConsistencyTokenResponse) GetConsistencyToken() string { @@ -757,8 +1029,8 @@ type CheckConsistencyRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the Table for which to check replication - // consistency. Values are of the form + // Required. The unique name of the Table for which to check replication consistency. + // Values are of the form // `projects/{project}/instances/{instance}/tables/{table}`. Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` // Required. The token created using GenerateConsistencyToken for the Table. @@ -768,7 +1040,7 @@ type CheckConsistencyRequest struct { func (x *CheckConsistencyRequest) Reset() { *x = CheckConsistencyRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -781,7 +1053,7 @@ func (x *CheckConsistencyRequest) String() string { func (*CheckConsistencyRequest) ProtoMessage() {} func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -794,7 +1066,7 @@ func (x *CheckConsistencyRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckConsistencyRequest.ProtoReflect.Descriptor instead. func (*CheckConsistencyRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{13} } func (x *CheckConsistencyRequest) GetName() string { @@ -826,7 +1098,7 @@ type CheckConsistencyResponse struct { func (x *CheckConsistencyResponse) Reset() { *x = CheckConsistencyResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -839,7 +1111,7 @@ func (x *CheckConsistencyResponse) String() string { func (*CheckConsistencyResponse) ProtoMessage() {} func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -852,7 +1124,7 @@ func (x *CheckConsistencyResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CheckConsistencyResponse.ProtoReflect.Descriptor instead. func (*CheckConsistencyResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{11} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14} } func (x *CheckConsistencyResponse) GetConsistent() bool { @@ -882,9 +1154,9 @@ type SnapshotTableRequest struct { // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. Cluster string `protobuf:"bytes,2,opt,name=cluster,proto3" json:"cluster,omitempty"` - // Required. The ID by which the new snapshot should be referred to within the - // parent cluster, e.g., `mysnapshot` of the form: - // `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` rather than + // Required. The ID by which the new snapshot should be referred to within the parent + // cluster, e.g., `mysnapshot` of the form: `[_a-zA-Z0-9][-_.a-zA-Z0-9]*` + // rather than // `projects/{project}/instances/{instance}/clusters/{cluster}/snapshots/mysnapshot`. SnapshotId string `protobuf:"bytes,3,opt,name=snapshot_id,json=snapshotId,proto3" json:"snapshot_id,omitempty"` // The amount of time that the new snapshot can stay active after it is @@ -899,7 +1171,7 @@ type SnapshotTableRequest struct { func (x *SnapshotTableRequest) Reset() { *x = SnapshotTableRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -912,7 +1184,7 @@ func (x *SnapshotTableRequest) String() string { func (*SnapshotTableRequest) ProtoMessage() {} func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -925,7 +1197,7 @@ func (x *SnapshotTableRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use SnapshotTableRequest.ProtoReflect.Descriptor instead. func (*SnapshotTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{12} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{15} } func (x *SnapshotTableRequest) GetName() string { @@ -984,7 +1256,7 @@ type GetSnapshotRequest struct { func (x *GetSnapshotRequest) Reset() { *x = GetSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -997,7 +1269,7 @@ func (x *GetSnapshotRequest) String() string { func (*GetSnapshotRequest) ProtoMessage() {} func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1010,7 +1282,7 @@ func (x *GetSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetSnapshotRequest.ProtoReflect.Descriptor instead. func (*GetSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{13} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{16} } func (x *GetSnapshotRequest) GetName() string { @@ -1032,8 +1304,8 @@ type ListSnapshotsRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The unique name of the cluster for which snapshots should be - // listed. Values are of the form + // Required. The unique name of the cluster for which snapshots should be listed. + // Values are of the form // `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list snapshots for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -1048,7 +1320,7 @@ type ListSnapshotsRequest struct { func (x *ListSnapshotsRequest) Reset() { *x = ListSnapshotsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1061,7 +1333,7 @@ func (x *ListSnapshotsRequest) String() string { func (*ListSnapshotsRequest) ProtoMessage() {} func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1074,7 +1346,7 @@ func (x *ListSnapshotsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsRequest.ProtoReflect.Descriptor instead. func (*ListSnapshotsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{14} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{17} } func (x *ListSnapshotsRequest) GetParent() string { @@ -1121,7 +1393,7 @@ type ListSnapshotsResponse struct { func (x *ListSnapshotsResponse) Reset() { *x = ListSnapshotsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1134,7 +1406,7 @@ func (x *ListSnapshotsResponse) String() string { func (*ListSnapshotsResponse) ProtoMessage() {} func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1147,7 +1419,7 @@ func (x *ListSnapshotsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListSnapshotsResponse.ProtoReflect.Descriptor instead. func (*ListSnapshotsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{15} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{18} } func (x *ListSnapshotsResponse) GetSnapshots() []*Snapshot { @@ -1185,7 +1457,7 @@ type DeleteSnapshotRequest struct { func (x *DeleteSnapshotRequest) Reset() { *x = DeleteSnapshotRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1198,7 +1470,7 @@ func (x *DeleteSnapshotRequest) String() string { func (*DeleteSnapshotRequest) ProtoMessage() {} func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1211,7 +1483,7 @@ func (x *DeleteSnapshotRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteSnapshotRequest.ProtoReflect.Descriptor instead. func (*DeleteSnapshotRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{16} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{19} } func (x *DeleteSnapshotRequest) GetName() string { @@ -1243,7 +1515,7 @@ type SnapshotTableMetadata struct { func (x *SnapshotTableMetadata) Reset() { *x = SnapshotTableMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1256,7 +1528,7 @@ func (x *SnapshotTableMetadata) String() string { func (*SnapshotTableMetadata) ProtoMessage() {} func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1269,7 +1541,7 @@ func (x *SnapshotTableMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use SnapshotTableMetadata.ProtoReflect.Descriptor instead. func (*SnapshotTableMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{17} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{20} } func (x *SnapshotTableMetadata) GetOriginalRequest() *SnapshotTableRequest { @@ -1316,7 +1588,7 @@ type CreateTableFromSnapshotMetadata struct { func (x *CreateTableFromSnapshotMetadata) Reset() { *x = CreateTableFromSnapshotMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1329,7 +1601,7 @@ func (x *CreateTableFromSnapshotMetadata) String() string { func (*CreateTableFromSnapshotMetadata) ProtoMessage() {} func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1342,7 +1614,7 @@ func (x *CreateTableFromSnapshotMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateTableFromSnapshotMetadata.ProtoReflect.Descriptor instead. func (*CreateTableFromSnapshotMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{18} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{21} } func (x *CreateTableFromSnapshotMetadata) GetOriginalRequest() *CreateTableFromSnapshotRequest { @@ -1366,8 +1638,7 @@ func (x *CreateTableFromSnapshotMetadata) GetFinishTime() *timestamppb.Timestamp return nil } -// The request for -// [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. +// The request for [CreateBackup][google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup]. type CreateBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1391,7 +1662,7 @@ type CreateBackupRequest struct { func (x *CreateBackupRequest) Reset() { *x = CreateBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1404,7 +1675,7 @@ func (x *CreateBackupRequest) String() string { func (*CreateBackupRequest) ProtoMessage() {} func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1417,7 +1688,7 @@ func (x *CreateBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupRequest.ProtoReflect.Descriptor instead. func (*CreateBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{19} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{22} } func (x *CreateBackupRequest) GetParent() string { @@ -1461,7 +1732,7 @@ type CreateBackupMetadata struct { func (x *CreateBackupMetadata) Reset() { *x = CreateBackupMetadata{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1474,7 +1745,7 @@ func (x *CreateBackupMetadata) String() string { func (*CreateBackupMetadata) ProtoMessage() {} func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1487,7 +1758,7 @@ func (x *CreateBackupMetadata) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupMetadata.ProtoReflect.Descriptor instead. func (*CreateBackupMetadata) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{20} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{23} } func (x *CreateBackupMetadata) GetName() string { @@ -1518,36 +1789,42 @@ func (x *CreateBackupMetadata) GetEndTime() *timestamppb.Timestamp { return nil } -// The request for -// [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. -type GetBackupRequest struct { +// The request for [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. +type UpdateBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. Name of the backup. - // Values are of the form - // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Required. The backup to update. `backup.name`, and the fields to be updated + // as specified by `update_mask` are required. Other fields are ignored. + // Update is only supported for the following fields: + // * `backup.expire_time`. + Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"` + // Required. A mask specifying which fields (e.g. `expire_time`) in the + // Backup resource should be updated. This mask is relative to the Backup + // resource, not to the request message. The field mask must always be + // specified; this prevents any future fields from being erased accidentally + // by clients that do not know about them. + UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` } -func (x *GetBackupRequest) Reset() { - *x = GetBackupRequest{} +func (x *UpdateBackupRequest) Reset() { + *x = UpdateBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetBackupRequest) String() string { +func (x *UpdateBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetBackupRequest) ProtoMessage() {} +func (*UpdateBackupRequest) ProtoMessage() {} -func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21] +func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1558,55 +1835,54 @@ func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetBackupRequest.ProtoReflect.Descriptor instead. -func (*GetBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{21} +// Deprecated: Use UpdateBackupRequest.ProtoReflect.Descriptor instead. +func (*UpdateBackupRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{24} } -func (x *GetBackupRequest) GetName() string { +func (x *UpdateBackupRequest) GetBackup() *Backup { if x != nil { - return x.Name + return x.Backup } - return "" + return nil } -// The request for -// [UpdateBackup][google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup]. -type UpdateBackupRequest struct { +func (x *UpdateBackupRequest) GetUpdateMask() *fieldmaskpb.FieldMask { + if x != nil { + return x.UpdateMask + } + return nil +} + +// The request for [GetBackup][google.bigtable.admin.v2.BigtableTableAdmin.GetBackup]. +type GetBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The backup to update. `backup.name`, and the fields to be updated - // as specified by `update_mask` are required. Other fields are ignored. - // Update is only supported for the following fields: - // * `backup.expire_time`. - Backup *Backup `protobuf:"bytes,1,opt,name=backup,proto3" json:"backup,omitempty"` - // Required. A mask specifying which fields (e.g. `expire_time`) in the - // Backup resource should be updated. This mask is relative to the Backup - // resource, not to the request message. The field mask must always be - // specified; this prevents any future fields from being erased accidentally - // by clients that do not know about them. - UpdateMask *fieldmaskpb.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask,proto3" json:"update_mask,omitempty"` + // Required. Name of the backup. + // Values are of the form + // `projects/{project}/instances/{instance}/clusters/{cluster}/backups/{backup}`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` } -func (x *UpdateBackupRequest) Reset() { - *x = UpdateBackupRequest{} +func (x *GetBackupRequest) Reset() { + *x = GetBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *UpdateBackupRequest) String() string { +func (x *GetBackupRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateBackupRequest) ProtoMessage() {} +func (*GetBackupRequest) ProtoMessage() {} -func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22] +func (x *GetBackupRequest) ProtoReflect() protoreflect.Message { + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1617,27 +1893,19 @@ func (x *UpdateBackupRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateBackupRequest.ProtoReflect.Descriptor instead. -func (*UpdateBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{22} -} - -func (x *UpdateBackupRequest) GetBackup() *Backup { - if x != nil { - return x.Backup - } - return nil +// Deprecated: Use GetBackupRequest.ProtoReflect.Descriptor instead. +func (*GetBackupRequest) Descriptor() ([]byte, []int) { + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{25} } -func (x *UpdateBackupRequest) GetUpdateMask() *fieldmaskpb.FieldMask { +func (x *GetBackupRequest) GetName() string { if x != nil { - return x.UpdateMask + return x.Name } - return nil + return "" } -// The request for -// [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. +// The request for [DeleteBackup][google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup]. type DeleteBackupRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -1652,7 +1920,7 @@ type DeleteBackupRequest struct { func (x *DeleteBackupRequest) Reset() { *x = DeleteBackupRequest{} if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1665,7 +1933,7 @@ func (x *DeleteBackupRequest) String() string { func (*DeleteBackupRequest) ProtoMessage() {} func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23] + mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1678,7 +1946,7 @@ func (x *DeleteBackupRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeleteBackupRequest.ProtoReflect.Descriptor instead. func (*DeleteBackupRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{23} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{26} } func (x *DeleteBackupRequest) GetName() string { @@ -1688,14 +1956,13 @@ func (x *DeleteBackupRequest) GetName() string { return "" } -// The request for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +// The request for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. type ListBackupsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Required. The cluster to list backups from. Values are of the + // Required. The cluster to list backups from. Values are of the // form `projects/{project}/instances/{instance}/clusters/{cluster}`. // Use `{cluster} = '-'` to list backups for all clusters in an instance, // e.g., `projects/{project}/instances/{instance}/clusters/-`. @@ -1704,7 +1971,7 @@ type ListBackupsRequest struct { // The expression must specify the field name, a comparison operator, // and the value that you want to use for filtering. The value must be a // string, a number, or a boolean. The comparison operator must be - // <, >, <=, >=, !=, =, or :. Colon ‘:’ represents a HAS operator which is + // <, >, <=, >=, !=, =, or :. Colon ':' represents a HAS operator which is // roughly synonymous with equality. Filter rules are case insensitive. // // The fields eligible for filtering are: @@ -1734,297 +2001,38 @@ type ListBackupsRequest struct { // * `size_bytes > 10000000000` --> The backup's size is greater than 10GB Filter string `protobuf:"bytes,2,opt,name=filter,proto3" json:"filter,omitempty"` // An expression for specifying the sort order of the results of the request. - // The string value should specify one or more fields in - // [Backup][google.bigtable.admin.v2.Backup]. The full syntax is described at - // https://aip.dev/132#ordering. - // - // Fields supported are: - // * name - // * source_table - // * expire_time - // * start_time - // * end_time - // * size_bytes - // * state - // - // For example, "start_time". The default sorting order is ascending. - // To specify descending order for the field, a suffix " desc" should - // be appended to the field name. For example, "start_time desc". - // Redundant space characters in the syntax are insigificant. - // - // If order_by is empty, results will be sorted by `start_time` in descending - // order starting from the most recently created backup. - OrderBy string `protobuf:"bytes,3,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` - // Number of backups to be returned in the response. If 0 or - // less, defaults to the server's maximum allowed page size. - PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - // If non-empty, `page_token` should contain a - // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] - // from a previous - // [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the - // same `parent` and with the same `filter`. - PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` -} - -func (x *ListBackupsRequest) Reset() { - *x = ListBackupsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBackupsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBackupsRequest) ProtoMessage() {} - -func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead. -func (*ListBackupsRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{24} -} - -func (x *ListBackupsRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *ListBackupsRequest) GetFilter() string { - if x != nil { - return x.Filter - } - return "" -} - -func (x *ListBackupsRequest) GetOrderBy() string { - if x != nil { - return x.OrderBy - } - return "" -} - -func (x *ListBackupsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListBackupsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -// The response for -// [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. -type ListBackupsResponse struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // The list of matching backups. - Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` - // `next_page_token` can be sent in a subsequent - // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call - // to fetch more of the matching backups. - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` -} - -func (x *ListBackupsResponse) Reset() { - *x = ListBackupsResponse{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *ListBackupsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBackupsResponse) ProtoMessage() {} - -func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead. -func (*ListBackupsResponse) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{25} -} - -func (x *ListBackupsResponse) GetBackups() []*Backup { - if x != nil { - return x.Backups - } - return nil -} - -func (x *ListBackupsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -// The request for -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -type RestoreTableRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Required. The name of the instance in which to create the restored - // table. This instance must be the parent of the source backup. Values are - // of the form `projects//instances/`. - Parent string `protobuf:"bytes,1,opt,name=parent,proto3" json:"parent,omitempty"` - // Required. The id of the table to create and restore to. This - // table must not already exist. The `table_id` appended to - // `parent` forms the full table name of the form - // `projects//instances//tables/`. - TableId string `protobuf:"bytes,2,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"` - // Required. The source from which to restore. + // The string value should specify one or more fields in [Backup][google.bigtable.admin.v2.Backup]. The full + // syntax is described at https://aip.dev/132#ordering. // - // Types that are assignable to Source: - // *RestoreTableRequest_Backup - Source isRestoreTableRequest_Source `protobuf_oneof:"source"` -} - -func (x *RestoreTableRequest) Reset() { - *x = RestoreTableRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RestoreTableRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RestoreTableRequest) ProtoMessage() {} - -func (x *RestoreTableRequest) ProtoReflect() protoreflect.Message { - mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RestoreTableRequest.ProtoReflect.Descriptor instead. -func (*RestoreTableRequest) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{26} -} - -func (x *RestoreTableRequest) GetParent() string { - if x != nil { - return x.Parent - } - return "" -} - -func (x *RestoreTableRequest) GetTableId() string { - if x != nil { - return x.TableId - } - return "" -} - -func (m *RestoreTableRequest) GetSource() isRestoreTableRequest_Source { - if m != nil { - return m.Source - } - return nil -} - -func (x *RestoreTableRequest) GetBackup() string { - if x, ok := x.GetSource().(*RestoreTableRequest_Backup); ok { - return x.Backup - } - return "" -} - -type isRestoreTableRequest_Source interface { - isRestoreTableRequest_Source() -} - -type RestoreTableRequest_Backup struct { - // Name of the backup from which to restore. Values are of the form - // `projects//instances//clusters//backups/`. - Backup string `protobuf:"bytes,3,opt,name=backup,proto3,oneof"` -} - -func (*RestoreTableRequest_Backup) isRestoreTableRequest_Source() {} - -// Metadata type for the long-running operation returned by -// [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable]. -type RestoreTableMetadata struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // Name of the table being created and restored to. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The type of the restore source. - SourceType RestoreSourceType `protobuf:"varint,2,opt,name=source_type,json=sourceType,proto3,enum=google.bigtable.admin.v2.RestoreSourceType" json:"source_type,omitempty"` - // Information about the source used to restore the table, as specified by - // `source` in - // [RestoreTableRequest][google.bigtable.admin.v2.RestoreTableRequest]. - // - // Types that are assignable to SourceInfo: - // *RestoreTableMetadata_BackupInfo - SourceInfo isRestoreTableMetadata_SourceInfo `protobuf_oneof:"source_info"` - // If exists, the name of the long-running operation that will be used to - // track the post-restore optimization process to optimize the performance of - // the restored table. The metadata type of the long-running operation is - // [OptimizeRestoreTableMetadata][]. The response type is - // [Empty][google.protobuf.Empty]. This long-running operation may be - // automatically created by the system if applicable after the - // RestoreTable long-running operation completes successfully. This operation - // may not be created if the table is already optimized or the restore was - // not successful. - OptimizeTableOperationName string `protobuf:"bytes,4,opt,name=optimize_table_operation_name,json=optimizeTableOperationName,proto3" json:"optimize_table_operation_name,omitempty"` - // The progress of the - // [RestoreTable][google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable] - // operation. - Progress *OperationProgress `protobuf:"bytes,5,opt,name=progress,proto3" json:"progress,omitempty"` + // Fields supported are: + // * name + // * source_table + // * expire_time + // * start_time + // * end_time + // * size_bytes + // * state + // + // For example, "start_time". The default sorting order is ascending. + // To specify descending order for the field, a suffix " desc" should + // be appended to the field name. For example, "start_time desc". + // Redundant space characters in the syntax are insigificant. + // + // If order_by is empty, results will be sorted by `start_time` in descending + // order starting from the most recently created backup. + OrderBy string `protobuf:"bytes,3,opt,name=order_by,json=orderBy,proto3" json:"order_by,omitempty"` + // Number of backups to be returned in the response. If 0 or + // less, defaults to the server's maximum allowed page size. + PageSize int32 `protobuf:"varint,4,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + // If non-empty, `page_token` should contain a + // [next_page_token][google.bigtable.admin.v2.ListBackupsResponse.next_page_token] from a + // previous [ListBackupsResponse][google.bigtable.admin.v2.ListBackupsResponse] to the same `parent` and with the same + // `filter`. + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` } -func (x *RestoreTableMetadata) Reset() { - *x = RestoreTableMetadata{} +func (x *ListBackupsRequest) Reset() { + *x = ListBackupsRequest{} if protoimpl.UnsafeEnabled { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2032,13 +2040,13 @@ func (x *RestoreTableMetadata) Reset() { } } -func (x *RestoreTableMetadata) String() string { +func (x *ListBackupsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RestoreTableMetadata) ProtoMessage() {} +func (*ListBackupsRequest) ProtoMessage() {} -func (x *RestoreTableMetadata) ProtoReflect() protoreflect.Message { +func (x *ListBackupsRequest) ProtoReflect() protoreflect.Message { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2050,80 +2058,62 @@ func (x *RestoreTableMetadata) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RestoreTableMetadata.ProtoReflect.Descriptor instead. -func (*RestoreTableMetadata) Descriptor() ([]byte, []int) { +// Deprecated: Use ListBackupsRequest.ProtoReflect.Descriptor instead. +func (*ListBackupsRequest) Descriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{27} } -func (x *RestoreTableMetadata) GetName() string { +func (x *ListBackupsRequest) GetParent() string { if x != nil { - return x.Name + return x.Parent } return "" } -func (x *RestoreTableMetadata) GetSourceType() RestoreSourceType { +func (x *ListBackupsRequest) GetFilter() string { if x != nil { - return x.SourceType - } - return RestoreSourceType_RESTORE_SOURCE_TYPE_UNSPECIFIED -} - -func (m *RestoreTableMetadata) GetSourceInfo() isRestoreTableMetadata_SourceInfo { - if m != nil { - return m.SourceInfo - } - return nil -} - -func (x *RestoreTableMetadata) GetBackupInfo() *BackupInfo { - if x, ok := x.GetSourceInfo().(*RestoreTableMetadata_BackupInfo); ok { - return x.BackupInfo + return x.Filter } - return nil + return "" } -func (x *RestoreTableMetadata) GetOptimizeTableOperationName() string { +func (x *ListBackupsRequest) GetOrderBy() string { if x != nil { - return x.OptimizeTableOperationName + return x.OrderBy } return "" } -func (x *RestoreTableMetadata) GetProgress() *OperationProgress { +func (x *ListBackupsRequest) GetPageSize() int32 { if x != nil { - return x.Progress + return x.PageSize } - return nil -} - -type isRestoreTableMetadata_SourceInfo interface { - isRestoreTableMetadata_SourceInfo() + return 0 } -type RestoreTableMetadata_BackupInfo struct { - BackupInfo *BackupInfo `protobuf:"bytes,3,opt,name=backup_info,json=backupInfo,proto3,oneof"` +func (x *ListBackupsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" } -func (*RestoreTableMetadata_BackupInfo) isRestoreTableMetadata_SourceInfo() {} - -// Metadata type for the long-running operation used to track the progress -// of optimizations performed on a newly restored table. This long-running -// operation is automatically created by the system after the successful -// completion of a table restore, and cannot be cancelled. -type OptimizeRestoredTableMetadata struct { +// The response for [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups]. +type ListBackupsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Name of the restored table being optimized. - Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` - // The progress of the post-restore optimizations. - Progress *OperationProgress `protobuf:"bytes,2,opt,name=progress,proto3" json:"progress,omitempty"` + // The list of matching backups. + Backups []*Backup `protobuf:"bytes,1,rep,name=backups,proto3" json:"backups,omitempty"` + // `next_page_token` can be sent in a subsequent + // [ListBackups][google.bigtable.admin.v2.BigtableTableAdmin.ListBackups] call to fetch more + // of the matching backups. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` } -func (x *OptimizeRestoredTableMetadata) Reset() { - *x = OptimizeRestoredTableMetadata{} +func (x *ListBackupsResponse) Reset() { + *x = ListBackupsResponse{} if protoimpl.UnsafeEnabled { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2131,13 +2121,13 @@ func (x *OptimizeRestoredTableMetadata) Reset() { } } -func (x *OptimizeRestoredTableMetadata) String() string { +func (x *ListBackupsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*OptimizeRestoredTableMetadata) ProtoMessage() {} +func (*ListBackupsResponse) ProtoMessage() {} -func (x *OptimizeRestoredTableMetadata) ProtoReflect() protoreflect.Message { +func (x *ListBackupsResponse) ProtoReflect() protoreflect.Message { mi := &file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -2149,23 +2139,23 @@ func (x *OptimizeRestoredTableMetadata) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use OptimizeRestoredTableMetadata.ProtoReflect.Descriptor instead. -func (*OptimizeRestoredTableMetadata) Descriptor() ([]byte, []int) { +// Deprecated: Use ListBackupsResponse.ProtoReflect.Descriptor instead. +func (*ListBackupsResponse) Descriptor() ([]byte, []int) { return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{28} } -func (x *OptimizeRestoredTableMetadata) GetName() string { +func (x *ListBackupsResponse) GetBackups() []*Backup { if x != nil { - return x.Name + return x.Backups } - return "" + return nil } -func (x *OptimizeRestoredTableMetadata) GetProgress() *OperationProgress { +func (x *ListBackupsResponse) GetNextPageToken() string { if x != nil { - return x.Progress + return x.NextPageToken } - return nil + return "" } // An initial split point for a newly created table. @@ -2207,7 +2197,7 @@ func (x *CreateTableRequest_Split) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateTableRequest_Split.ProtoReflect.Descriptor instead. func (*CreateTableRequest_Split) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{0, 0} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{3, 0} } func (x *CreateTableRequest_Split) GetKey() []byte { @@ -2263,7 +2253,7 @@ func (x *ModifyColumnFamiliesRequest_Modification) ProtoReflect() protoreflect.M // Deprecated: Use ModifyColumnFamiliesRequest_Modification.ProtoReflect.Descriptor instead. func (*ModifyColumnFamiliesRequest_Modification) Descriptor() ([]byte, []int) { - return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{7, 0} + return file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP(), []int{10, 0} } func (x *ModifyColumnFamiliesRequest_Modification) GetId() string { @@ -2366,6 +2356,48 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xc0, 0x01, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, + 0x0a, 0x20, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x52, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x0a, 0x08, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x42, 0x03, 0xe0, 0x41, + 0x02, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x42, 0x23, 0xfa, 0x41, 0x20, 0x0a, + 0x1e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x48, + 0x00, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x22, 0xdc, 0x02, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x4c, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, + 0x70, 0x65, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x47, + 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x41, 0x0a, 0x1d, 0x6f, 0x70, 0x74, 0x69, 0x6d, + 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x1a, + 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, 0x65, + 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x72, + 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x22, 0x7c, 0x0a, 0x1d, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x52, 0x65, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, + 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, + 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, + 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x72, + 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x22, 0xa8, 0x02, 0x0a, 0x12, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x40, 0x0a, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x28, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x22, 0x0a, @@ -2592,12 +2624,7 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x65, 0x12, 0x35, 0x0a, 0x08, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, - 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x4e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, - 0x41, 0x20, 0x0a, 0x1e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, + 0x07, 0x65, 0x6e, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x22, 0x96, 0x01, 0x0a, 0x13, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3d, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, @@ -2607,7 +2634,12 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4d, 0x61, 0x73, 0x6b, 0x42, 0x03, 0xe0, 0x41, 0x02, 0x52, 0x0a, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x4d, 0x61, 0x73, - 0x6b, 0x22, 0x51, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, + 0x6b, 0x22, 0x4e, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x22, 0x51, 0x0a, 0x13, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x3a, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x42, 0x26, 0xe0, 0x41, 0x02, 0xfa, 0x41, 0x20, 0x0a, 0x1e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, @@ -2632,351 +2664,319 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDesc = []byte{ 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x07, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x70, 0x61, 0x67, 0x65, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6e, 0x65, 0x78, 0x74, 0x50, 0x61, 0x67, - 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x6c, 0x0a, 0x13, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, - 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, - 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x49, 0x64, - 0x12, 0x18, 0x0a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x48, 0x00, 0x52, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x42, 0x08, 0x0a, 0x06, 0x73, 0x6f, - 0x75, 0x72, 0x63, 0x65, 0x22, 0xdc, 0x02, 0x0a, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, - 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, - 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, - 0x79, 0x70, 0x65, 0x52, 0x0a, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x47, 0x0a, 0x0b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, - 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x61, - 0x63, 0x6b, 0x75, 0x70, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x41, 0x0a, 0x1d, 0x6f, 0x70, 0x74, 0x69, - 0x6d, 0x69, 0x7a, 0x65, 0x5f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x6f, 0x70, 0x65, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x1a, 0x6f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4f, 0x70, - 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x70, - 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, + 0x65, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x32, 0x98, 0x25, 0x0a, 0x12, 0x42, 0x69, 0x67, 0x74, 0x61, + 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xab, 0x01, + 0x0a, 0x0b, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x50, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x42, 0x0d, 0x0a, 0x0b, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x69, - 0x6e, 0x66, 0x6f, 0x22, 0x7c, 0x0a, 0x1d, 0x4f, 0x70, 0x74, 0x69, 0x6d, 0x69, 0x7a, 0x65, 0x52, - 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x64, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, - 0x64, 0x61, 0x74, 0x61, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x67, - 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, - 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x50, - 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, 0x73, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x67, 0x72, 0x65, 0x73, - 0x73, 0x32, 0xc8, 0x24, 0x0a, 0x12, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x12, 0xab, 0x01, 0x0a, 0x0b, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2f, 0x22, - 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, - 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, - 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, - 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8a, 0x02, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, - 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, - 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, - 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x95, 0x01, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x4d, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x2f, 0x22, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, - 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1f, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, - 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, - 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0xca, 0x41, 0x28, 0x0a, 0x05, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, - 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x4d, 0x65, 0x74, 0x61, 0x64, - 0x61, 0x74, 0x61, 0x12, 0xa4, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x3b, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0x91, 0x01, 0x0a, 0x08, 0x47, - 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x15, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x8a, 0x02, 0x0a, 0x17, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x38, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, - 0x62, 0x6c, 0x65, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, - 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, - 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x8e, - 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2c, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x2a, 0x2a, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, - 0xcf, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, - 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, - 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, - 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, 0x6f, 0x6c, 0x75, 0x6d, - 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x12, 0x6e, - 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, - 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x72, - 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, + 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, + 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x22, 0x95, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x42, 0x22, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x46, 0x72, 0x6f, 0x6d, + 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x1f, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5f, 0x69, 0x64, 0x2c, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0xca, 0x41, 0x28, + 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x1f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x54, + 0x61, 0x62, 0x6c, 0x65, 0x46, 0x72, 0x6f, 0x6d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa4, 0x01, 0x0a, 0x0a, 0x4c, 0x69, 0x73, + 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x12, 0x2b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, + 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, + 0x91, 0x01, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x29, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, + 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, + 0x61, 0x6d, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x0b, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, + 0x62, 0x6c, 0x65, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x39, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x2c, 0x2a, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, - 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x64, 0x72, - 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xe8, 0x01, - 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, - 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x39, 0x2e, 0x67, 0x6f, 0x6f, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xcf, 0x01, 0x0a, 0x14, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x12, 0x35, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, + 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4d, 0x6f, 0x64, 0x69, 0x66, 0x79, 0x43, + 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x44, 0x22, 0x3f, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x79, + 0x43, 0x6f, 0x6c, 0x75, 0x6d, 0x6e, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x69, 0x65, 0x73, 0x3a, 0x01, + 0x2a, 0xda, 0x41, 0x12, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x63, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x44, 0x72, 0x6f, 0x70, 0x52, + 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x44, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x42, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3c, 0x22, 0x37, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, + 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x3a, 0x64, 0x72, 0x6f, 0x70, 0x52, 0x6f, 0x77, 0x52, 0x61, 0x6e, 0x67, 0x65, 0x3a, + 0x01, 0x2a, 0x12, 0xe8, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, + 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, + 0x39, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, + 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, + 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, + 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, + 0x6b, 0x65, 0x6e, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, + 0x0a, 0x10, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, + 0x63, 0x79, 0x12, 0x31, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, + 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, - 0x2e, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, - 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x22, 0x55, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x48, 0x22, 0x43, 0x2f, 0x76, 0x32, 0x2f, 0x7b, - 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, - 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x43, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x3a, 0x01, - 0x2a, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xda, 0x01, 0x0a, 0x10, 0x43, 0x68, 0x65, - 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x12, 0x31, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x32, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x68, 0x65, 0x63, - 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, - 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x16, 0x6e, - 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x5f, - 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x43, 0x68, 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, + 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x5f, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x63, 0x68, + 0x65, 0x63, 0x6b, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x63, 0x79, 0x3a, 0x01, + 0x2a, 0xda, 0x41, 0x16, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6f, 0x6e, 0x73, 0x69, 0x73, 0x74, + 0x65, 0x6e, 0x63, 0x79, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0xea, 0x01, 0x0a, 0x0d, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2e, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, + 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x38, 0x22, 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, + 0x3a, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, + 0x61, 0x6d, 0x65, 0x2c, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, + 0x73, 0x68, 0x6f, 0x74, 0x5f, 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, + 0x69, 0x6f, 0x6e, 0xca, 0x41, 0x21, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x12, 0x15, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, - 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x89, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x22, - 0x33, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, + 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, + 0x2e, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, + 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0xbb, 0x01, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x73, 0x12, 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, + 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, + 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x24, 0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x2c, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x5f, - 0x69, 0x64, 0x2c, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0xca, 0x41, - 0x21, 0x0a, 0x08, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x15, 0x53, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, - 0x74, 0x61, 0x12, 0xa8, 0x01, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, - 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, - 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x53, 0x6e, 0x61, 0x70, - 0x73, 0x68, 0x6f, 0x74, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, - 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, - 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, - 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, - 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xbb, 0x01, - 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x12, - 0x2e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, - 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x53, - 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x49, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x12, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, - 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xa2, 0x01, 0x0a, 0x0e, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x12, 0x2f, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, - 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, - 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x3a, 0x2a, - 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x73, 0x6e, + 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, + 0x12, 0xa2, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, + 0x68, 0x6f, 0x74, 0x12, 0x2f, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x53, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x47, 0x82, 0xd3, + 0xe4, 0x93, 0x02, 0x3a, 0x2a, 0x38, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, + 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, + 0x2a, 0x2f, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xe0, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, + 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, + 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x81, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, - 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x73, 0x6e, 0x61, - 0x70, 0x73, 0x68, 0x6f, 0x74, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0xe0, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, - 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, - 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, - 0x81, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x40, 0x22, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, - 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0xda, 0x41, 0x17, 0x70, 0x61, 0x72, 0x65, - 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, 0x2c, 0x62, 0x61, 0x63, - 0x6b, 0x75, 0x70, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x47, 0x65, 0x74, - 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, - 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, - 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, 0x55, 0x70, 0x64, 0x61, 0x74, - 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, - 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x17, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x69, 0x64, + 0x2c, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xca, 0x41, 0x1e, 0x0a, 0x06, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x14, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0xa0, 0x01, 0x0a, 0x09, 0x47, 0x65, 0x74, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, - 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x47, - 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2e, 0x6e, 0x61, - 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, - 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, - 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, 0x6b, 0x12, 0x9c, 0x01, 0x0a, - 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, - 0x6d, 0x70, 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x2a, 0x36, 0x2f, 0x76, + 0x32, 0x2e, 0x47, 0x65, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, + 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xb3, 0x01, 0x0a, 0x0b, - 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x12, 0x2c, 0x2e, 0x67, 0x6f, + 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0xc3, 0x01, 0x0a, 0x0c, + 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, - 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, - 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, - 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x38, - 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, - 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, - 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x7d, - 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, 0x70, 0x61, 0x72, 0x65, 0x6e, - 0x74, 0x12, 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, - 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x6e, 0x67, 0x72, - 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, - 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x3a, 0x01, 0x2a, 0xca, - 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, 0x52, 0x65, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, - 0x9c, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, - 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, - 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0x51, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x40, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, - 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, - 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, - 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, - 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0xf3, - 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, - 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, - 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, - 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, 0xa7, 0x01, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, - 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, - 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, - 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, - 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, - 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, - 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, - 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, - 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x6f, - 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, - 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x28, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, - 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, - 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, - 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0xb8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, 0x41, 0x2f, 0x76, 0x32, 0x2f, + 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x22, 0x62, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x47, 0x32, 0x3d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x62, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, + 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, + 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x06, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0xda, 0x41, 0x12, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x2c, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x5f, 0x6d, 0x61, 0x73, + 0x6b, 0x12, 0x9c, 0x01, 0x0a, 0x0c, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, + 0x6c, 0x65, 0x74, 0x65, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x45, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x38, 0x2a, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x70, 0x72, 0x6f, + 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, + 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0xda, 0x41, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0xb3, 0x01, 0x0a, 0x0b, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x12, 0x2c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, + 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, + 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x42, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x47, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x38, 0x12, 0x36, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, + 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, + 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, + 0x72, 0x73, 0x2f, 0x2a, 0x7d, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0xda, 0x41, 0x06, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x12, 0xbb, 0x01, 0x0a, 0x0c, 0x52, 0x65, 0x73, 0x74, 0x6f, + 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x6c, 0x6f, 0x6e, 0x67, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x4f, 0x70, 0x65, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x5d, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x37, 0x22, 0x32, 0x2f, + 0x76, 0x32, 0x2f, 0x7b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x7d, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x3a, 0x72, 0x65, 0x73, 0x74, 0x6f, 0x72, + 0x65, 0x3a, 0x01, 0x2a, 0xca, 0x41, 0x1d, 0x0a, 0x05, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x14, + 0x52, 0x65, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x4d, 0x65, 0x74, 0x61, + 0x64, 0x61, 0x74, 0x61, 0x12, 0xec, 0x01, 0x0a, 0x0c, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, + 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, + 0x63, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, + 0x22, 0xa0, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, - 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, - 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, - 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, - 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, - 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, - 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x74, - 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, - 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2c, - 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0xde, 0x02, 0xca, 0x41, - 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x67, - 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0xd2, 0x41, 0xbb, - 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, - 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, - 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, + 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, + 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, + 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, + 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, + 0x2f, 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, + 0x63, 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x67, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, + 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x12, 0xf3, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x12, 0x22, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, + 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, 0x6c, 0x69, 0x63, + 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x22, + 0xa7, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x8e, 0x01, 0x22, 0x3b, 0x2f, 0x76, 0x32, 0x2f, 0x7b, + 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, + 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, + 0x50, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0x5a, 0x4c, 0x22, 0x47, 0x2f, 0x76, 0x32, + 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, + 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, + 0x2a, 0x2f, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, + 0x6b, 0x75, 0x70, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, 0x73, 0x65, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x6f, + 0x6c, 0x69, 0x63, 0x79, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x0f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2c, 0x70, 0x6f, 0x6c, 0x69, 0x63, 0x79, 0x12, 0xa4, 0x02, 0x0a, 0x12, 0x54, 0x65, + 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x12, 0x28, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, + 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x69, 0x61, 0x6d, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x65, 0x73, 0x74, 0x49, + 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x9a, 0x01, 0x22, + 0x41, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, + 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x73, 0x2f, 0x2a, 0x7d, 0x3a, + 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0x5a, 0x52, 0x22, 0x4d, 0x2f, 0x76, 0x32, 0x2f, 0x7b, 0x72, 0x65, + 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x3d, 0x70, 0x72, 0x6f, 0x6a, 0x65, 0x63, 0x74, 0x73, 0x2f, + 0x2a, 0x2f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x2f, 0x2a, 0x2f, 0x63, 0x6c, + 0x75, 0x73, 0x74, 0x65, 0x72, 0x73, 0x2f, 0x2a, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x73, + 0x2f, 0x2a, 0x7d, 0x3a, 0x74, 0x65, 0x73, 0x74, 0x49, 0x61, 0x6d, 0x50, 0x65, 0x72, 0x6d, 0x69, + 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x3a, 0x01, 0x2a, 0xda, 0x41, 0x14, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x2c, 0x70, 0x65, 0x72, 0x6d, 0x69, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x73, + 0x1a, 0xde, 0x02, 0xca, 0x41, 0x1c, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, + 0x6f, 0x6d, 0xd2, 0x41, 0xbb, 0x02, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, + 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, + 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x61, 0x75, 0x74, 0x68, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, - 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, - 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, - 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, 0x79, 0x42, 0xdf, 0x01, 0x0a, - 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x62, 0x69, 0x67, 0x74, - 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x76, 0x32, 0x42, 0x17, 0x42, - 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, 0x65, 0x41, 0x64, 0x6d, 0x69, - 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, - 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2f, 0x76, - 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, - 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, - 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x47, 0x6f, 0x6f, 0x67, 0x6c, - 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, - 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, 0x22, 0x47, 0x6f, 0x6f, 0x67, - 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, 0x42, 0x69, 0x67, 0x74, 0x61, - 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, 0x3a, 0x56, 0x32, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2c, 0x68, + 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, + 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2e, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, + 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, + 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, + 0x70, 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2c, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, + 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x61, 0x70, 0x69, 0x73, 0x2e, + 0x63, 0x6f, 0x6d, 0x2f, 0x61, 0x75, 0x74, 0x68, 0x2f, 0x63, 0x6c, 0x6f, 0x75, 0x64, 0x2d, 0x70, + 0x6c, 0x61, 0x74, 0x66, 0x6f, 0x72, 0x6d, 0x2e, 0x72, 0x65, 0x61, 0x64, 0x2d, 0x6f, 0x6e, 0x6c, + 0x79, 0x42, 0xdf, 0x01, 0x0a, 0x1c, 0x63, 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x2e, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0x2e, + 0x76, 0x32, 0x42, 0x17, 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x54, 0x61, 0x62, 0x6c, + 0x65, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3d, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, + 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, + 0x61, 0x70, 0x69, 0x73, 0x2f, 0x62, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x2f, 0x61, 0x64, + 0x6d, 0x69, 0x6e, 0x2f, 0x76, 0x32, 0x3b, 0x61, 0x64, 0x6d, 0x69, 0x6e, 0xaa, 0x02, 0x1e, 0x47, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x2e, 0x42, 0x69, 0x67, 0x74, + 0x61, 0x62, 0x6c, 0x65, 0x2e, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, + 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x5c, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x5c, 0x42, 0x69, 0x67, + 0x74, 0x61, 0x62, 0x6c, 0x65, 0x5c, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x5c, 0x56, 0x32, 0xea, 0x02, + 0x22, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x3a, 0x3a, 0x43, 0x6c, 0x6f, 0x75, 0x64, 0x3a, 0x3a, + 0x42, 0x69, 0x67, 0x74, 0x61, 0x62, 0x6c, 0x65, 0x3a, 0x3a, 0x41, 0x64, 0x6d, 0x69, 0x6e, 0x3a, + 0x3a, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2993,47 +2993,47 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_rawDescGZIP() []by var file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes = make([]protoimpl.MessageInfo, 31) var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interface{}{ - (*CreateTableRequest)(nil), // 0: google.bigtable.admin.v2.CreateTableRequest - (*CreateTableFromSnapshotRequest)(nil), // 1: google.bigtable.admin.v2.CreateTableFromSnapshotRequest - (*DropRowRangeRequest)(nil), // 2: google.bigtable.admin.v2.DropRowRangeRequest - (*ListTablesRequest)(nil), // 3: google.bigtable.admin.v2.ListTablesRequest - (*ListTablesResponse)(nil), // 4: google.bigtable.admin.v2.ListTablesResponse - (*GetTableRequest)(nil), // 5: google.bigtable.admin.v2.GetTableRequest - (*DeleteTableRequest)(nil), // 6: google.bigtable.admin.v2.DeleteTableRequest - (*ModifyColumnFamiliesRequest)(nil), // 7: google.bigtable.admin.v2.ModifyColumnFamiliesRequest - (*GenerateConsistencyTokenRequest)(nil), // 8: google.bigtable.admin.v2.GenerateConsistencyTokenRequest - (*GenerateConsistencyTokenResponse)(nil), // 9: google.bigtable.admin.v2.GenerateConsistencyTokenResponse - (*CheckConsistencyRequest)(nil), // 10: google.bigtable.admin.v2.CheckConsistencyRequest - (*CheckConsistencyResponse)(nil), // 11: google.bigtable.admin.v2.CheckConsistencyResponse - (*SnapshotTableRequest)(nil), // 12: google.bigtable.admin.v2.SnapshotTableRequest - (*GetSnapshotRequest)(nil), // 13: google.bigtable.admin.v2.GetSnapshotRequest - (*ListSnapshotsRequest)(nil), // 14: google.bigtable.admin.v2.ListSnapshotsRequest - (*ListSnapshotsResponse)(nil), // 15: google.bigtable.admin.v2.ListSnapshotsResponse - (*DeleteSnapshotRequest)(nil), // 16: google.bigtable.admin.v2.DeleteSnapshotRequest - (*SnapshotTableMetadata)(nil), // 17: google.bigtable.admin.v2.SnapshotTableMetadata - (*CreateTableFromSnapshotMetadata)(nil), // 18: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata - (*CreateBackupRequest)(nil), // 19: google.bigtable.admin.v2.CreateBackupRequest - (*CreateBackupMetadata)(nil), // 20: google.bigtable.admin.v2.CreateBackupMetadata - (*GetBackupRequest)(nil), // 21: google.bigtable.admin.v2.GetBackupRequest - (*UpdateBackupRequest)(nil), // 22: google.bigtable.admin.v2.UpdateBackupRequest - (*DeleteBackupRequest)(nil), // 23: google.bigtable.admin.v2.DeleteBackupRequest - (*ListBackupsRequest)(nil), // 24: google.bigtable.admin.v2.ListBackupsRequest - (*ListBackupsResponse)(nil), // 25: google.bigtable.admin.v2.ListBackupsResponse - (*RestoreTableRequest)(nil), // 26: google.bigtable.admin.v2.RestoreTableRequest - (*RestoreTableMetadata)(nil), // 27: google.bigtable.admin.v2.RestoreTableMetadata - (*OptimizeRestoredTableMetadata)(nil), // 28: google.bigtable.admin.v2.OptimizeRestoredTableMetadata + (*RestoreTableRequest)(nil), // 0: google.bigtable.admin.v2.RestoreTableRequest + (*RestoreTableMetadata)(nil), // 1: google.bigtable.admin.v2.RestoreTableMetadata + (*OptimizeRestoredTableMetadata)(nil), // 2: google.bigtable.admin.v2.OptimizeRestoredTableMetadata + (*CreateTableRequest)(nil), // 3: google.bigtable.admin.v2.CreateTableRequest + (*CreateTableFromSnapshotRequest)(nil), // 4: google.bigtable.admin.v2.CreateTableFromSnapshotRequest + (*DropRowRangeRequest)(nil), // 5: google.bigtable.admin.v2.DropRowRangeRequest + (*ListTablesRequest)(nil), // 6: google.bigtable.admin.v2.ListTablesRequest + (*ListTablesResponse)(nil), // 7: google.bigtable.admin.v2.ListTablesResponse + (*GetTableRequest)(nil), // 8: google.bigtable.admin.v2.GetTableRequest + (*DeleteTableRequest)(nil), // 9: google.bigtable.admin.v2.DeleteTableRequest + (*ModifyColumnFamiliesRequest)(nil), // 10: google.bigtable.admin.v2.ModifyColumnFamiliesRequest + (*GenerateConsistencyTokenRequest)(nil), // 11: google.bigtable.admin.v2.GenerateConsistencyTokenRequest + (*GenerateConsistencyTokenResponse)(nil), // 12: google.bigtable.admin.v2.GenerateConsistencyTokenResponse + (*CheckConsistencyRequest)(nil), // 13: google.bigtable.admin.v2.CheckConsistencyRequest + (*CheckConsistencyResponse)(nil), // 14: google.bigtable.admin.v2.CheckConsistencyResponse + (*SnapshotTableRequest)(nil), // 15: google.bigtable.admin.v2.SnapshotTableRequest + (*GetSnapshotRequest)(nil), // 16: google.bigtable.admin.v2.GetSnapshotRequest + (*ListSnapshotsRequest)(nil), // 17: google.bigtable.admin.v2.ListSnapshotsRequest + (*ListSnapshotsResponse)(nil), // 18: google.bigtable.admin.v2.ListSnapshotsResponse + (*DeleteSnapshotRequest)(nil), // 19: google.bigtable.admin.v2.DeleteSnapshotRequest + (*SnapshotTableMetadata)(nil), // 20: google.bigtable.admin.v2.SnapshotTableMetadata + (*CreateTableFromSnapshotMetadata)(nil), // 21: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata + (*CreateBackupRequest)(nil), // 22: google.bigtable.admin.v2.CreateBackupRequest + (*CreateBackupMetadata)(nil), // 23: google.bigtable.admin.v2.CreateBackupMetadata + (*UpdateBackupRequest)(nil), // 24: google.bigtable.admin.v2.UpdateBackupRequest + (*GetBackupRequest)(nil), // 25: google.bigtable.admin.v2.GetBackupRequest + (*DeleteBackupRequest)(nil), // 26: google.bigtable.admin.v2.DeleteBackupRequest + (*ListBackupsRequest)(nil), // 27: google.bigtable.admin.v2.ListBackupsRequest + (*ListBackupsResponse)(nil), // 28: google.bigtable.admin.v2.ListBackupsResponse (*CreateTableRequest_Split)(nil), // 29: google.bigtable.admin.v2.CreateTableRequest.Split (*ModifyColumnFamiliesRequest_Modification)(nil), // 30: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - (*Table)(nil), // 31: google.bigtable.admin.v2.Table - (Table_View)(0), // 32: google.bigtable.admin.v2.Table.View - (*durationpb.Duration)(nil), // 33: google.protobuf.Duration - (*Snapshot)(nil), // 34: google.bigtable.admin.v2.Snapshot - (*timestamppb.Timestamp)(nil), // 35: google.protobuf.Timestamp - (*Backup)(nil), // 36: google.bigtable.admin.v2.Backup - (*fieldmaskpb.FieldMask)(nil), // 37: google.protobuf.FieldMask - (RestoreSourceType)(0), // 38: google.bigtable.admin.v2.RestoreSourceType - (*BackupInfo)(nil), // 39: google.bigtable.admin.v2.BackupInfo - (*OperationProgress)(nil), // 40: google.bigtable.admin.v2.OperationProgress + (RestoreSourceType)(0), // 31: google.bigtable.admin.v2.RestoreSourceType + (*BackupInfo)(nil), // 32: google.bigtable.admin.v2.BackupInfo + (*OperationProgress)(nil), // 33: google.bigtable.admin.v2.OperationProgress + (*Table)(nil), // 34: google.bigtable.admin.v2.Table + (Table_View)(0), // 35: google.bigtable.admin.v2.Table.View + (*durationpb.Duration)(nil), // 36: google.protobuf.Duration + (*Snapshot)(nil), // 37: google.bigtable.admin.v2.Snapshot + (*timestamppb.Timestamp)(nil), // 38: google.protobuf.Timestamp + (*Backup)(nil), // 39: google.bigtable.admin.v2.Backup + (*fieldmaskpb.FieldMask)(nil), // 40: google.protobuf.FieldMask (*ColumnFamily)(nil), // 41: google.bigtable.admin.v2.ColumnFamily (*v1.GetIamPolicyRequest)(nil), // 42: google.iam.v1.GetIamPolicyRequest (*v1.SetIamPolicyRequest)(nil), // 43: google.iam.v1.SetIamPolicyRequest @@ -3044,72 +3044,72 @@ var file_google_bigtable_admin_v2_bigtable_table_admin_proto_goTypes = []interfa (*v1.TestIamPermissionsResponse)(nil), // 48: google.iam.v1.TestIamPermissionsResponse } var file_google_bigtable_admin_v2_bigtable_table_admin_proto_depIdxs = []int32{ - 31, // 0: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table - 29, // 1: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split - 32, // 2: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 31, // 3: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table - 32, // 4: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View - 30, // 5: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification - 33, // 6: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration - 34, // 7: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot - 12, // 8: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest - 35, // 9: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp - 35, // 10: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp - 1, // 11: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 35, // 12: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp - 35, // 13: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp - 36, // 14: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 35, // 15: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp - 35, // 16: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp - 36, // 17: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup - 37, // 18: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask - 36, // 19: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup - 38, // 20: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType - 39, // 21: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo - 40, // 22: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress - 40, // 23: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 31, // 0: google.bigtable.admin.v2.RestoreTableMetadata.source_type:type_name -> google.bigtable.admin.v2.RestoreSourceType + 32, // 1: google.bigtable.admin.v2.RestoreTableMetadata.backup_info:type_name -> google.bigtable.admin.v2.BackupInfo + 33, // 2: google.bigtable.admin.v2.RestoreTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 33, // 3: google.bigtable.admin.v2.OptimizeRestoredTableMetadata.progress:type_name -> google.bigtable.admin.v2.OperationProgress + 34, // 4: google.bigtable.admin.v2.CreateTableRequest.table:type_name -> google.bigtable.admin.v2.Table + 29, // 5: google.bigtable.admin.v2.CreateTableRequest.initial_splits:type_name -> google.bigtable.admin.v2.CreateTableRequest.Split + 35, // 6: google.bigtable.admin.v2.ListTablesRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 34, // 7: google.bigtable.admin.v2.ListTablesResponse.tables:type_name -> google.bigtable.admin.v2.Table + 35, // 8: google.bigtable.admin.v2.GetTableRequest.view:type_name -> google.bigtable.admin.v2.Table.View + 30, // 9: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.modifications:type_name -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification + 36, // 10: google.bigtable.admin.v2.SnapshotTableRequest.ttl:type_name -> google.protobuf.Duration + 37, // 11: google.bigtable.admin.v2.ListSnapshotsResponse.snapshots:type_name -> google.bigtable.admin.v2.Snapshot + 15, // 12: google.bigtable.admin.v2.SnapshotTableMetadata.original_request:type_name -> google.bigtable.admin.v2.SnapshotTableRequest + 38, // 13: google.bigtable.admin.v2.SnapshotTableMetadata.request_time:type_name -> google.protobuf.Timestamp + 38, // 14: google.bigtable.admin.v2.SnapshotTableMetadata.finish_time:type_name -> google.protobuf.Timestamp + 4, // 15: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.original_request:type_name -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 38, // 16: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.request_time:type_name -> google.protobuf.Timestamp + 38, // 17: google.bigtable.admin.v2.CreateTableFromSnapshotMetadata.finish_time:type_name -> google.protobuf.Timestamp + 39, // 18: google.bigtable.admin.v2.CreateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 38, // 19: google.bigtable.admin.v2.CreateBackupMetadata.start_time:type_name -> google.protobuf.Timestamp + 38, // 20: google.bigtable.admin.v2.CreateBackupMetadata.end_time:type_name -> google.protobuf.Timestamp + 39, // 21: google.bigtable.admin.v2.UpdateBackupRequest.backup:type_name -> google.bigtable.admin.v2.Backup + 40, // 22: google.bigtable.admin.v2.UpdateBackupRequest.update_mask:type_name -> google.protobuf.FieldMask + 39, // 23: google.bigtable.admin.v2.ListBackupsResponse.backups:type_name -> google.bigtable.admin.v2.Backup 41, // 24: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.create:type_name -> google.bigtable.admin.v2.ColumnFamily 41, // 25: google.bigtable.admin.v2.ModifyColumnFamiliesRequest.Modification.update:type_name -> google.bigtable.admin.v2.ColumnFamily - 0, // 26: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest - 1, // 27: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest - 3, // 28: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest - 5, // 29: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest - 6, // 30: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest - 7, // 31: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest - 2, // 32: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest - 8, // 33: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest - 10, // 34: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest - 12, // 35: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest - 13, // 36: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest - 14, // 37: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest - 16, // 38: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest - 19, // 39: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest - 21, // 40: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest - 22, // 41: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest - 23, // 42: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest - 24, // 43: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest - 26, // 44: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest + 3, // 26: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:input_type -> google.bigtable.admin.v2.CreateTableRequest + 4, // 27: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:input_type -> google.bigtable.admin.v2.CreateTableFromSnapshotRequest + 6, // 28: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:input_type -> google.bigtable.admin.v2.ListTablesRequest + 8, // 29: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:input_type -> google.bigtable.admin.v2.GetTableRequest + 9, // 30: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:input_type -> google.bigtable.admin.v2.DeleteTableRequest + 10, // 31: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:input_type -> google.bigtable.admin.v2.ModifyColumnFamiliesRequest + 5, // 32: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:input_type -> google.bigtable.admin.v2.DropRowRangeRequest + 11, // 33: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:input_type -> google.bigtable.admin.v2.GenerateConsistencyTokenRequest + 13, // 34: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:input_type -> google.bigtable.admin.v2.CheckConsistencyRequest + 15, // 35: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:input_type -> google.bigtable.admin.v2.SnapshotTableRequest + 16, // 36: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:input_type -> google.bigtable.admin.v2.GetSnapshotRequest + 17, // 37: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:input_type -> google.bigtable.admin.v2.ListSnapshotsRequest + 19, // 38: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:input_type -> google.bigtable.admin.v2.DeleteSnapshotRequest + 22, // 39: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:input_type -> google.bigtable.admin.v2.CreateBackupRequest + 25, // 40: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:input_type -> google.bigtable.admin.v2.GetBackupRequest + 24, // 41: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:input_type -> google.bigtable.admin.v2.UpdateBackupRequest + 26, // 42: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:input_type -> google.bigtable.admin.v2.DeleteBackupRequest + 27, // 43: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:input_type -> google.bigtable.admin.v2.ListBackupsRequest + 0, // 44: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:input_type -> google.bigtable.admin.v2.RestoreTableRequest 42, // 45: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:input_type -> google.iam.v1.GetIamPolicyRequest 43, // 46: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:input_type -> google.iam.v1.SetIamPolicyRequest 44, // 47: google.bigtable.admin.v2.BigtableTableAdmin.TestIamPermissions:input_type -> google.iam.v1.TestIamPermissionsRequest - 31, // 48: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table + 34, // 48: google.bigtable.admin.v2.BigtableTableAdmin.CreateTable:output_type -> google.bigtable.admin.v2.Table 45, // 49: google.bigtable.admin.v2.BigtableTableAdmin.CreateTableFromSnapshot:output_type -> google.longrunning.Operation - 4, // 50: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse - 31, // 51: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table + 7, // 50: google.bigtable.admin.v2.BigtableTableAdmin.ListTables:output_type -> google.bigtable.admin.v2.ListTablesResponse + 34, // 51: google.bigtable.admin.v2.BigtableTableAdmin.GetTable:output_type -> google.bigtable.admin.v2.Table 46, // 52: google.bigtable.admin.v2.BigtableTableAdmin.DeleteTable:output_type -> google.protobuf.Empty - 31, // 53: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table + 34, // 53: google.bigtable.admin.v2.BigtableTableAdmin.ModifyColumnFamilies:output_type -> google.bigtable.admin.v2.Table 46, // 54: google.bigtable.admin.v2.BigtableTableAdmin.DropRowRange:output_type -> google.protobuf.Empty - 9, // 55: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse - 11, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse + 12, // 55: google.bigtable.admin.v2.BigtableTableAdmin.GenerateConsistencyToken:output_type -> google.bigtable.admin.v2.GenerateConsistencyTokenResponse + 14, // 56: google.bigtable.admin.v2.BigtableTableAdmin.CheckConsistency:output_type -> google.bigtable.admin.v2.CheckConsistencyResponse 45, // 57: google.bigtable.admin.v2.BigtableTableAdmin.SnapshotTable:output_type -> google.longrunning.Operation - 34, // 58: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot - 15, // 59: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse + 37, // 58: google.bigtable.admin.v2.BigtableTableAdmin.GetSnapshot:output_type -> google.bigtable.admin.v2.Snapshot + 18, // 59: google.bigtable.admin.v2.BigtableTableAdmin.ListSnapshots:output_type -> google.bigtable.admin.v2.ListSnapshotsResponse 46, // 60: google.bigtable.admin.v2.BigtableTableAdmin.DeleteSnapshot:output_type -> google.protobuf.Empty 45, // 61: google.bigtable.admin.v2.BigtableTableAdmin.CreateBackup:output_type -> google.longrunning.Operation - 36, // 62: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup - 36, // 63: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup + 39, // 62: google.bigtable.admin.v2.BigtableTableAdmin.GetBackup:output_type -> google.bigtable.admin.v2.Backup + 39, // 63: google.bigtable.admin.v2.BigtableTableAdmin.UpdateBackup:output_type -> google.bigtable.admin.v2.Backup 46, // 64: google.bigtable.admin.v2.BigtableTableAdmin.DeleteBackup:output_type -> google.protobuf.Empty - 25, // 65: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse + 28, // 65: google.bigtable.admin.v2.BigtableTableAdmin.ListBackups:output_type -> google.bigtable.admin.v2.ListBackupsResponse 45, // 66: google.bigtable.admin.v2.BigtableTableAdmin.RestoreTable:output_type -> google.longrunning.Operation 47, // 67: google.bigtable.admin.v2.BigtableTableAdmin.GetIamPolicy:output_type -> google.iam.v1.Policy 47, // 68: google.bigtable.admin.v2.BigtableTableAdmin.SetIamPolicy:output_type -> google.iam.v1.Policy @@ -3130,7 +3130,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { file_google_bigtable_admin_v2_table_proto_init() if !protoimpl.UnsafeEnabled { file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableRequest); i { + switch v := v.(*RestoreTableRequest); i { case 0: return &v.state case 1: @@ -3142,7 +3142,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableFromSnapshotRequest); i { + switch v := v.(*RestoreTableMetadata); i { case 0: return &v.state case 1: @@ -3154,7 +3154,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DropRowRangeRequest); i { + switch v := v.(*OptimizeRestoredTableMetadata); i { case 0: return &v.state case 1: @@ -3166,7 +3166,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTablesRequest); i { + switch v := v.(*CreateTableRequest); i { case 0: return &v.state case 1: @@ -3178,7 +3178,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListTablesResponse); i { + switch v := v.(*CreateTableFromSnapshotRequest); i { case 0: return &v.state case 1: @@ -3190,7 +3190,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetTableRequest); i { + switch v := v.(*DropRowRangeRequest); i { case 0: return &v.state case 1: @@ -3202,7 +3202,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteTableRequest); i { + switch v := v.(*ListTablesRequest); i { case 0: return &v.state case 1: @@ -3214,7 +3214,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ModifyColumnFamiliesRequest); i { + switch v := v.(*ListTablesResponse); i { case 0: return &v.state case 1: @@ -3226,7 +3226,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateConsistencyTokenRequest); i { + switch v := v.(*GetTableRequest); i { case 0: return &v.state case 1: @@ -3238,7 +3238,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GenerateConsistencyTokenResponse); i { + switch v := v.(*DeleteTableRequest); i { case 0: return &v.state case 1: @@ -3250,7 +3250,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckConsistencyRequest); i { + switch v := v.(*ModifyColumnFamiliesRequest); i { case 0: return &v.state case 1: @@ -3262,7 +3262,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CheckConsistencyResponse); i { + switch v := v.(*GenerateConsistencyTokenRequest); i { case 0: return &v.state case 1: @@ -3274,7 +3274,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotTableRequest); i { + switch v := v.(*GenerateConsistencyTokenResponse); i { case 0: return &v.state case 1: @@ -3286,7 +3286,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetSnapshotRequest); i { + switch v := v.(*CheckConsistencyRequest); i { case 0: return &v.state case 1: @@ -3298,7 +3298,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsRequest); i { + switch v := v.(*CheckConsistencyResponse); i { case 0: return &v.state case 1: @@ -3310,7 +3310,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListSnapshotsResponse); i { + switch v := v.(*SnapshotTableRequest); i { case 0: return &v.state case 1: @@ -3322,7 +3322,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteSnapshotRequest); i { + switch v := v.(*GetSnapshotRequest); i { case 0: return &v.state case 1: @@ -3334,7 +3334,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SnapshotTableMetadata); i { + switch v := v.(*ListSnapshotsRequest); i { case 0: return &v.state case 1: @@ -3346,7 +3346,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateTableFromSnapshotMetadata); i { + switch v := v.(*ListSnapshotsResponse); i { case 0: return &v.state case 1: @@ -3358,7 +3358,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBackupRequest); i { + switch v := v.(*DeleteSnapshotRequest); i { case 0: return &v.state case 1: @@ -3370,7 +3370,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*CreateBackupMetadata); i { + switch v := v.(*SnapshotTableMetadata); i { case 0: return &v.state case 1: @@ -3382,7 +3382,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetBackupRequest); i { + switch v := v.(*CreateTableFromSnapshotMetadata); i { case 0: return &v.state case 1: @@ -3394,7 +3394,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*UpdateBackupRequest); i { + switch v := v.(*CreateBackupRequest); i { case 0: return &v.state case 1: @@ -3406,7 +3406,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteBackupRequest); i { + switch v := v.(*CreateBackupMetadata); i { case 0: return &v.state case 1: @@ -3418,7 +3418,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBackupsRequest); i { + switch v := v.(*UpdateBackupRequest); i { case 0: return &v.state case 1: @@ -3430,7 +3430,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ListBackupsResponse); i { + switch v := v.(*GetBackupRequest); i { case 0: return &v.state case 1: @@ -3442,7 +3442,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreTableRequest); i { + switch v := v.(*DeleteBackupRequest); i { case 0: return &v.state case 1: @@ -3454,7 +3454,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RestoreTableMetadata); i { + switch v := v.(*ListBackupsRequest); i { case 0: return &v.state case 1: @@ -3466,7 +3466,7 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*OptimizeRestoredTableMetadata); i { + switch v := v.(*ListBackupsResponse); i { case 0: return &v.state case 1: @@ -3502,16 +3502,16 @@ func file_google_bigtable_admin_v2_bigtable_table_admin_proto_init() { } } } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[2].OneofWrappers = []interface{}{ - (*DropRowRangeRequest_RowKeyPrefix)(nil), - (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), - } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[26].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[0].OneofWrappers = []interface{}{ (*RestoreTableRequest_Backup)(nil), } - file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[27].OneofWrappers = []interface{}{ + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[1].OneofWrappers = []interface{}{ (*RestoreTableMetadata_BackupInfo)(nil), } + file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[5].OneofWrappers = []interface{}{ + (*DropRowRangeRequest_RowKeyPrefix)(nil), + (*DropRowRangeRequest_DeleteAllDataFromTable)(nil), + } file_google_bigtable_admin_v2_bigtable_table_admin_proto_msgTypes[30].OneofWrappers = []interface{}{ (*ModifyColumnFamiliesRequest_Modification_Create)(nil), (*ModifyColumnFamiliesRequest_Modification_Update)(nil), @@ -3619,14 +3619,14 @@ type BigtableTableAdminClient interface { // recommended for production use. It is not subject to any SLA or deprecation // policy. DeleteSnapshot(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*emptypb.Empty, error) - // Starts creating a new Cloud Bigtable Backup. The returned backup + // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - // returned operation will stop the creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the + // creation and delete the backup. CreateBackup(ctx context.Context, in *CreateBackupRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) // Gets metadata on a pending or completed Cloud Bigtable Backup. GetBackup(ctx context.Context, in *GetBackupRequest, opts ...grpc.CallOption) (*Backup, error) @@ -3638,22 +3638,22 @@ type BigtableTableAdminClient interface { // backups. ListBackups(ctx context.Context, in *ListBackupsRequest, opts ...grpc.CallOption) (*ListBackupsResponse, error) // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The + // must be in the same instance as the instance containing the backup. The // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The + // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. RestoreTable(ctx context.Context, in *RestoreTableRequest, opts ...grpc.CallOption) (*longrunning.Operation, error) - // Gets the access control policy for a resource. + // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. GetIamPolicy(ctx context.Context, in *v1.GetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. SetIamPolicy(ctx context.Context, in *v1.SetIamPolicyRequest, opts ...grpc.CallOption) (*v1.Policy, error) - // Returns permissions that the caller has on the specified table resource. + // Returns permissions that the caller has on the specified Table or Backup resource. TestIamPermissions(ctx context.Context, in *v1.TestIamPermissionsRequest, opts ...grpc.CallOption) (*v1.TestIamPermissionsResponse, error) } @@ -3935,14 +3935,14 @@ type BigtableTableAdminServer interface { // recommended for production use. It is not subject to any SLA or deprecation // policy. DeleteSnapshot(context.Context, *DeleteSnapshotRequest) (*emptypb.Empty, error) - // Starts creating a new Cloud Bigtable Backup. The returned backup + // Starts creating a new Cloud Bigtable Backup. The returned backup // [long-running operation][google.longrunning.Operation] can be used to // track creation of the backup. The // [metadata][google.longrunning.Operation.metadata] field type is // [CreateBackupMetadata][google.bigtable.admin.v2.CreateBackupMetadata]. The // [response][google.longrunning.Operation.response] field type is - // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the - // returned operation will stop the creation and delete the backup. + // [Backup][google.bigtable.admin.v2.Backup], if successful. Cancelling the returned operation will stop the + // creation and delete the backup. CreateBackup(context.Context, *CreateBackupRequest) (*longrunning.Operation, error) // Gets metadata on a pending or completed Cloud Bigtable Backup. GetBackup(context.Context, *GetBackupRequest) (*Backup, error) @@ -3954,22 +3954,22 @@ type BigtableTableAdminServer interface { // backups. ListBackups(context.Context, *ListBackupsRequest) (*ListBackupsResponse, error) // Create a new table by restoring from a completed backup. The new table - // must be in the same instance as the instance containing the backup. The + // must be in the same instance as the instance containing the backup. The // returned table [long-running operation][google.longrunning.Operation] can - // be used to track the progress of the operation, and to cancel it. The + // be used to track the progress of the operation, and to cancel it. The // [metadata][google.longrunning.Operation.metadata] field type is - // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The + // [RestoreTableMetadata][google.bigtable.admin.RestoreTableMetadata]. The // [response][google.longrunning.Operation.response] type is // [Table][google.bigtable.admin.v2.Table], if successful. RestoreTable(context.Context, *RestoreTableRequest) (*longrunning.Operation, error) - // Gets the access control policy for a resource. + // Gets the access control policy for a Table or Backup resource. // Returns an empty policy if the resource exists but does not have a policy // set. GetIamPolicy(context.Context, *v1.GetIamPolicyRequest) (*v1.Policy, error) // Sets the access control policy on a Table or Backup resource. // Replaces any existing policy. SetIamPolicy(context.Context, *v1.SetIamPolicyRequest) (*v1.Policy, error) - // Returns permissions that the caller has on the specified table resource. + // Returns permissions that the caller has on the specified Table or Backup resource. TestIamPermissions(context.Context, *v1.TestIamPermissionsRequest) (*v1.TestIamPermissionsResponse, error) } diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml index 055480b9ef8..7348c50c0c3 100644 --- a/vendor/gopkg.in/yaml.v2/.travis.yml +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -11,6 +11,7 @@ go: - "1.11.x" - "1.12.x" - "1.13.x" + - "1.14.x" - "tip" go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go index d2c2308f1f4..acf71402cf3 100644 --- a/vendor/gopkg.in/yaml.v2/apic.go +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -79,6 +79,8 @@ func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { parser.encoding = encoding } +var disableLineWrapping = false + // Create a new emitter object. func yaml_emitter_initialize(emitter *yaml_emitter_t) { *emitter = yaml_emitter_t{ @@ -86,7 +88,9 @@ func yaml_emitter_initialize(emitter *yaml_emitter_t) { raw_buffer: make([]byte, 0, output_raw_buffer_size), states: make([]yaml_emitter_state_t, 0, initial_stack_size), events: make([]yaml_event_t, 0, initial_queue_size), - best_width: -1, + } + if disableLineWrapping { + emitter.best_width = -1 } } diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod index 1934e876945..2cbb85aeacd 100644 --- a/vendor/gopkg.in/yaml.v2/go.mod +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -1,5 +1,5 @@ -module "gopkg.in/yaml.v2" +module gopkg.in/yaml.v2 -require ( - "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 -) +go 1.15 + +require gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go index 89650e293ac..30813884c06 100644 --- a/vendor/gopkg.in/yaml.v2/yaml.go +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -175,7 +175,7 @@ func unmarshal(in []byte, out interface{}, strict bool) (err error) { // Zero valued structs will be omitted if all their public // fields are zero, unless they implement an IsZero // method (see the IsZeroer interface type), in which -// case the field will be included if that method returns true. +// case the field will be excluded if IsZero returns true. // // flow Marshal using a flow style (useful for structs, // sequences and maps). @@ -464,3 +464,15 @@ func isZero(v reflect.Value) bool { } return false } + +// FutureLineWrap globally disables line wrapping when encoding long strings. +// This is a temporary and thus deprecated method introduced to faciliate +// migration towards v3, which offers more control of line lengths on +// individual encodings, and has a default matching the behavior introduced +// by this function. +// +// The default formatting of v2 was erroneously changed in v2.3.0 and reverted +// in v2.4.0, at which point this function was introduced to help migration. +func FutureLineWrap() { + disableLineWrapping = true +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 050a1634946..ae1546f72aa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,4 +1,4 @@ -# cloud.google.com/go v0.66.0 +# cloud.google.com/go v0.72.0 cloud.google.com/go cloud.google.com/go/compute/metadata cloud.google.com/go/iam @@ -49,16 +49,16 @@ github.com/alecthomas/kong # github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 github.com/alecthomas/template github.com/alecthomas/template/parse -# github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d +# github.com/alecthomas/units v0.0.0-20201120081800-1786d5ef83d4 github.com/alecthomas/units # github.com/apache/thrift v0.13.0 github.com/apache/thrift/lib/go/thrift -# github.com/armon/go-metrics v0.3.3 +# github.com/armon/go-metrics v0.3.6 github.com/armon/go-metrics github.com/armon/go-metrics/prometheus # github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef github.com/asaskevich/govalidator -# github.com/aws/aws-sdk-go v1.35.31 +# github.com/aws/aws-sdk-go v1.36.15 github.com/aws/aws-sdk-go/aws github.com/aws/aws-sdk-go/aws/arn github.com/aws/aws-sdk-go/aws/awserr @@ -110,8 +110,6 @@ github.com/aws/aws-sdk-go/service/sts github.com/aws/aws-sdk-go/service/sts/stsiface # github.com/beorn7/perks v1.0.1 github.com/beorn7/perks/quantile -# github.com/blang/semver v3.5.1+incompatible -github.com/blang/semver # github.com/bombsimon/wsl/v3 v3.1.0 github.com/bombsimon/wsl/v3 # github.com/bradfitz/gomemcache v0.0.0-20190913173617-a41fca850d0b => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab @@ -138,7 +136,7 @@ github.com/coreos/go-semver/semver github.com/coreos/go-systemd/journal # github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f github.com/coreos/pkg/capnslog -# github.com/cortexproject/cortex v1.6.0 +# github.com/cortexproject/cortex v1.6.1-0.20210205171041-527f9b58b93c ## explicit github.com/cortexproject/cortex/integration/e2e github.com/cortexproject/cortex/integration/e2e/db @@ -195,6 +193,7 @@ github.com/cortexproject/cortex/pkg/querier/lazyquery github.com/cortexproject/cortex/pkg/querier/queryrange github.com/cortexproject/cortex/pkg/querier/series github.com/cortexproject/cortex/pkg/querier/stats +github.com/cortexproject/cortex/pkg/querier/tenantfederation github.com/cortexproject/cortex/pkg/querier/worker github.com/cortexproject/cortex/pkg/ring github.com/cortexproject/cortex/pkg/ring/client @@ -233,6 +232,8 @@ github.com/cortexproject/cortex/pkg/util/grpc/healthcheck github.com/cortexproject/cortex/pkg/util/grpcclient github.com/cortexproject/cortex/pkg/util/grpcutil github.com/cortexproject/cortex/pkg/util/limiter +github.com/cortexproject/cortex/pkg/util/log +github.com/cortexproject/cortex/pkg/util/math github.com/cortexproject/cortex/pkg/util/middleware github.com/cortexproject/cortex/pkg/util/modules github.com/cortexproject/cortex/pkg/util/process @@ -396,7 +397,7 @@ github.com/golang/protobuf/ptypes/empty github.com/golang/protobuf/ptypes/struct github.com/golang/protobuf/ptypes/timestamp github.com/golang/protobuf/ptypes/wrappers -# github.com/golang/snappy v0.0.2 +# github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3 ## explicit github.com/golang/snappy # github.com/golangci/check v0.0.0-20180506172741-cfe4005ccda2 @@ -465,7 +466,7 @@ github.com/golangci/unconvert github.com/google/addlicense # github.com/google/btree v1.0.0 github.com/google/btree -# github.com/google/go-cmp v0.5.2 +# github.com/google/go-cmp v0.5.4 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -475,26 +476,13 @@ github.com/google/go-cmp/cmp/internal/value github.com/google/go-github/github # github.com/google/go-querystring v1.0.0 github.com/google/go-querystring/query -# github.com/google/pprof v0.0.0-20201117184057-ae444373da19 +# github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2 github.com/google/pprof/profile -# github.com/google/uuid v1.1.1 +# github.com/google/uuid v1.1.2 ## explicit github.com/google/uuid # github.com/googleapis/gax-go/v2 v2.0.5 github.com/googleapis/gax-go/v2 -# github.com/gophercloud/gophercloud v0.14.0 -github.com/gophercloud/gophercloud -github.com/gophercloud/gophercloud/openstack -github.com/gophercloud/gophercloud/openstack/identity/v2/tenants -github.com/gophercloud/gophercloud/openstack/identity/v2/tokens -github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/ec2tokens -github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/oauth1 -github.com/gophercloud/gophercloud/openstack/identity/v3/tokens -github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts -github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers -github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects -github.com/gophercloud/gophercloud/openstack/utils -github.com/gophercloud/gophercloud/pagination # github.com/gorilla/handlers v1.4.2 github.com/gorilla/handlers # github.com/gorilla/mux v1.7.4 @@ -521,6 +509,7 @@ github.com/grpc-ecosystem/go-grpc-middleware/util/metautils github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus # github.com/grpc-ecosystem/grpc-gateway v1.16.0 +## explicit github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options github.com/grpc-ecosystem/grpc-gateway/runtime @@ -530,7 +519,7 @@ github.com/grpc-ecosystem/grpc-gateway/utilities github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc # github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed github.com/hailocab/go-hostpool -# github.com/hashicorp/consul/api v1.7.0 +# github.com/hashicorp/consul/api v1.8.1 github.com/hashicorp/consul/api # github.com/hashicorp/errwrap v1.0.0 github.com/hashicorp/errwrap @@ -570,7 +559,7 @@ github.com/hashicorp/hcl/json/scanner github.com/hashicorp/hcl/json/token # github.com/hashicorp/memberlist v0.2.2 github.com/hashicorp/memberlist -# github.com/hashicorp/serf v0.9.3 +# github.com/hashicorp/serf v0.9.5 github.com/hashicorp/serf/coordinate # github.com/hashicorp/yamux v0.0.0-20190923154419-df201c70410d github.com/hashicorp/yamux @@ -737,7 +726,7 @@ github.com/nbutton23/zxcvbn-go/match github.com/nbutton23/zxcvbn-go/matching github.com/nbutton23/zxcvbn-go/scoring github.com/nbutton23/zxcvbn-go/utils/math -# github.com/ncw/swift v1.0.50 +# github.com/ncw/swift v1.0.52 github.com/ncw/swift # github.com/nishanths/exhaustive v0.0.0-20200525081945-8e46705b6132 github.com/nishanths/exhaustive @@ -751,6 +740,7 @@ github.com/olekukonko/tablewriter # github.com/open-telemetry/opentelemetry-proto v0.4.0 ## explicit github.com/open-telemetry/opentelemetry-proto/gen/go/common/v1 +github.com/open-telemetry/opentelemetry-proto/gen/go/metrics/v1 github.com/open-telemetry/opentelemetry-proto/gen/go/resource/v1 github.com/open-telemetry/opentelemetry-proto/gen/go/trace/v1 # github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df => github.com/pracucci/go-grpc v0.0.0-20201022134131-ef559b8db645 @@ -831,7 +821,7 @@ github.com/prometheus/alertmanager/store github.com/prometheus/alertmanager/template github.com/prometheus/alertmanager/types github.com/prometheus/alertmanager/ui -# github.com/prometheus/client_golang v1.8.0 +# github.com/prometheus/client_golang v1.9.0 ## explicit github.com/prometheus/client_golang/api github.com/prometheus/client_golang/api/prometheus/v1 @@ -858,7 +848,7 @@ github.com/prometheus/node_exporter/https github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20201119181812-c8f810083d3f +# github.com/prometheus/prometheus v1.8.2-0.20210124145330-b5dfa2414b9e ## explicit github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -993,7 +983,7 @@ github.com/tcnksm/go-latest github.com/tdakkota/asciicheck # github.com/tetafro/godot v0.4.2 github.com/tetafro/godot -# github.com/thanos-io/thanos v0.13.1-0.20201130180807-84afc97e7d58 +# github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/indexheader github.com/thanos-io/thanos/pkg/block/metadata @@ -1004,6 +994,7 @@ github.com/thanos-io/thanos/pkg/compact/downsample github.com/thanos-io/thanos/pkg/component github.com/thanos-io/thanos/pkg/discovery/cache github.com/thanos-io/thanos/pkg/discovery/dns +github.com/thanos-io/thanos/pkg/discovery/dns/godns github.com/thanos-io/thanos/pkg/discovery/dns/miekgdns github.com/thanos-io/thanos/pkg/errutil github.com/thanos-io/thanos/pkg/extprom @@ -1074,7 +1065,7 @@ github.com/ultraware/funlen github.com/ultraware/whitespace # github.com/uudashr/gocognit v1.0.1 github.com/uudashr/gocognit -# github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec +# github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120 ## explicit github.com/weaveworks/common/aws github.com/weaveworks/common/errors @@ -1187,7 +1178,7 @@ go.mongodb.org/mongo-driver/bson/bsonrw go.mongodb.org/mongo-driver/bson/bsontype go.mongodb.org/mongo-driver/bson/primitive go.mongodb.org/mongo-driver/x/bsonx/bsoncore -# go.opencensus.io v0.22.4 +# go.opencensus.io v0.22.5 ## explicit go.opencensus.io go.opencensus.io/internal @@ -1279,7 +1270,7 @@ go.uber.org/zap/internal/bufferpool go.uber.org/zap/internal/color go.uber.org/zap/internal/exit go.uber.org/zap/zapcore -# golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 +# golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9 golang.org/x/crypto/argon2 golang.org/x/crypto/bcrypt golang.org/x/crypto/blake2b @@ -1294,7 +1285,7 @@ golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.0.0-20201110031124-69a78807bb2b +# golang.org/x/net v0.0.0-20201224014010-6772e930b67b golang.org/x/net/bpf golang.org/x/net/context golang.org/x/net/context/ctxhttp @@ -1313,16 +1304,16 @@ golang.org/x/net/ipv6 golang.org/x/net/netutil golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58 +# golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5 golang.org/x/oauth2 golang.org/x/oauth2/google golang.org/x/oauth2/internal golang.org/x/oauth2/jws golang.org/x/oauth2/jwt -# golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9 +# golang.org/x/sync v0.0.0-20201207232520-09787c993a3a golang.org/x/sync/errgroup golang.org/x/sync/semaphore -# golang.org/x/sys v0.0.0-20201119102817-f84b799fce68 +# golang.org/x/sys v0.0.0-20201223074533-0d417f636930 golang.org/x/sys/cpu golang.org/x/sys/internal/unsafeheader golang.org/x/sys/unix @@ -1335,10 +1326,10 @@ golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm golang.org/x/text/width -# golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e +# golang.org/x/time v0.0.0-20201208040808-7e3f01d25324 ## explicit golang.org/x/time/rate -# golang.org/x/tools v0.0.0-20201119054027-25dc3e1ccc3c +# golang.org/x/tools v0.0.0-20201228162255-34cd474b9958 golang.org/x/tools/cmd/goimports golang.org/x/tools/go/analysis golang.org/x/tools/go/analysis/passes/asmdecl @@ -1405,7 +1396,7 @@ golang.org/x/tools/internal/typesinternal # golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 golang.org/x/xerrors golang.org/x/xerrors/internal -# google.golang.org/api v0.35.0 +# google.golang.org/api v0.36.0 ## explicit google.golang.org/api/cloudresourcemanager/v1 google.golang.org/api/googleapi @@ -1423,7 +1414,7 @@ google.golang.org/api/transport/grpc google.golang.org/api/transport/http google.golang.org/api/transport/http/internal/propagation google.golang.org/api/transport/internal/dca -# google.golang.org/appengine v1.6.6 +# google.golang.org/appengine v1.6.7 google.golang.org/appengine google.golang.org/appengine/internal google.golang.org/appengine/internal/app_identity @@ -1436,8 +1427,7 @@ google.golang.org/appengine/internal/socket google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/socket google.golang.org/appengine/urlfetch -# google.golang.org/genproto v0.0.0-20201028140639-c77dae4b0522 -## explicit +# google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/bigtable/admin/v2 @@ -1448,7 +1438,7 @@ google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/status google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/protobuf/field_mask -# google.golang.org/grpc v1.33.1 => google.golang.org/grpc v1.29.1 +# google.golang.org/grpc v1.33.2 => google.golang.org/grpc v1.29.1 ## explicit google.golang.org/grpc google.golang.org/grpc/attributes @@ -1557,7 +1547,7 @@ gopkg.in/fsnotify/fsnotify.v1 gopkg.in/inf.v0 # gopkg.in/ini.v1 v1.57.0 gopkg.in/ini.v1 -# gopkg.in/yaml.v2 v2.3.0 +# gopkg.in/yaml.v2 v2.4.0 ## explicit gopkg.in/yaml.v2 # gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 @@ -1615,6 +1605,7 @@ sourcegraph.com/sqs/pbtypes # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab # github.com/opentracing-contrib/go-grpc => github.com/pracucci/go-grpc v0.0.0-20201022134131-ef559b8db645 # github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 +# k8s.io/api => k8s.io/api v0.19.4 # k8s.io/client-go => k8s.io/client-go v0.19.2 # github.com/prometheus/prometheus/discovery/config => ./vendor-fix/github.com/prometheus/prometheus/discovery/config # github.com/go-openapi/errors => github.com/go-openapi/errors v0.19.4 From 48fce8c322a029900c727430a8662b796d8c51c9 Mon Sep 17 00:00:00 2001 From: Martin Disibio Date: Mon, 8 Feb 2021 13:32:09 -0500 Subject: [PATCH 2/6] Serve new cortex memberlist page on /memberlist --- cmd/tempo/app/modules.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index 125f55e1360..bc052a2cdd2 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -232,6 +232,8 @@ func (t *App) initMemberlistKV() (services.Service, error) { t.cfg.Distributor.DistributorRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV t.cfg.Compactor.ShardingRing.KVStore.MemberlistKV = t.memberlistKV.GetMemberlistKV + t.server.HTTP.Handle("/memberlist", t.memberlistKV) + return t.memberlistKV, nil } From 0cde8c3d9f6d8d32330124177c51c048d8c89769 Mon Sep 17 00:00:00 2001 From: Martin Disibio Date: Mon, 8 Feb 2021 13:42:23 -0500 Subject: [PATCH 3/6] Update changelog --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index b0c803ff149..93fec2ea7e2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ * [ENHANCEMENT] Add S3 options region and forcepathstyle [#431](https://github.com/grafana/tempo/issues/431) * [ENHANCEMENT] Add exhaustive search to combine traces from all blocks in the backend. [#489](https://github.com/grafana/tempo/pull/489) * [ENHANCEMENT] Add per-tenant block retention [#77](https://github.com/grafana/tempo/issues/77) -* [BUGFIX] Upgrade cortex dependency to 1.6 to address issue with forgetting ring membership [#442](https://github.com/grafana/tempo/pull/442) +* [BUGFIX] Upgrade cortex dependency to v1.7.0-rc.0+ to address issue with forgetting ring membership [#442](https://github.com/grafana/tempo/pull/442) [#512](https://github.com/grafana/tempo/pull/512) * [BUGFIX] No longer raise the `tempodb_blocklist_poll_errors_total` metric if a block doesn't have meta or compacted meta. [#481](https://github.com/grafana/tempo/pull/481) ## v0.5.0 From ab29f1c5013d7920f979eebe43a2c0a5d962e5fd Mon Sep 17 00:00:00 2001 From: Martin Disibio Date: Mon, 8 Feb 2021 13:51:24 -0500 Subject: [PATCH 4/6] Fix vendor --- go.mod | 1 - vendor/modules.txt | 1 - 2 files changed, 2 deletions(-) diff --git a/go.mod b/go.mod index fab43cb8965..b3c82874566 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,6 @@ require ( github.com/google/uuid v1.1.2 github.com/gorilla/mux v1.7.4 github.com/grafana/loki v1.3.0 - github.com/grpc-ecosystem/grpc-gateway v1.16.0 github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 github.com/hashicorp/go-hclog v0.14.0 github.com/jaegertracing/jaeger v1.18.2-0.20200707061226-97d2319ff2be diff --git a/vendor/modules.txt b/vendor/modules.txt index ae1546f72aa..7da2beba7d2 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -740,7 +740,6 @@ github.com/olekukonko/tablewriter # github.com/open-telemetry/opentelemetry-proto v0.4.0 ## explicit github.com/open-telemetry/opentelemetry-proto/gen/go/common/v1 -github.com/open-telemetry/opentelemetry-proto/gen/go/metrics/v1 github.com/open-telemetry/opentelemetry-proto/gen/go/resource/v1 github.com/open-telemetry/opentelemetry-proto/gen/go/trace/v1 # github.com/opentracing-contrib/go-grpc v0.0.0-20191001143057-db30781987df => github.com/pracucci/go-grpc v0.0.0-20201022134131-ef559b8db645 From 7014d8ba806c31ebeae994a3d0df83031066dadc Mon Sep 17 00:00:00 2001 From: Martin Disibio Date: Mon, 8 Feb 2021 13:52:14 -0500 Subject: [PATCH 5/6] Fix vendor --- vendor/modules.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/vendor/modules.txt b/vendor/modules.txt index 7da2beba7d2..1ecdadec2fa 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -509,7 +509,6 @@ github.com/grpc-ecosystem/go-grpc-middleware/util/metautils github.com/grpc-ecosystem/go-grpc-prometheus github.com/grpc-ecosystem/go-grpc-prometheus/packages/grpcstatus # github.com/grpc-ecosystem/grpc-gateway v1.16.0 -## explicit github.com/grpc-ecosystem/grpc-gateway/internal github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/options github.com/grpc-ecosystem/grpc-gateway/runtime From b53ce96f23f45bcd83af69e1ce7e2c3c86b10363 Mon Sep 17 00:00:00 2001 From: Martin Disibio Date: Mon, 8 Feb 2021 14:48:38 -0500 Subject: [PATCH 6/6] Fix memberlistkv new dependency on server module for startup order --- cmd/tempo/app/modules.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/tempo/app/modules.go b/cmd/tempo/app/modules.go index bc052a2cdd2..b903fdf5a00 100644 --- a/cmd/tempo/app/modules.go +++ b/cmd/tempo/app/modules.go @@ -256,7 +256,7 @@ func (t *App) setupModuleManager() error { // Server: nil, // Overrides: nil, // Store: nil, - // MemberlistKV: nil, + MemberlistKV: {Server}, QueryFrontend: {Server}, Ring: {Server, MemberlistKV}, Distributor: {Ring, Server, Overrides},